diff --git a/src/UserGuide/Master/Table/Basic-Concept/Table-Management_apache.md b/src/UserGuide/Master/Table/Basic-Concept/Table-Management_apache.md index 1aacb9d3b..4b9a9c558 100644 --- a/src/UserGuide/Master/Table/Basic-Concept/Table-Management_apache.md +++ b/src/UserGuide/Master/Table/Basic-Concept/Table-Management_apache.md @@ -127,14 +127,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **Examples:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -162,7 +167,9 @@ Used to view column names, data types, categories, and states of a table. **Examples:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -177,8 +184,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -215,7 +225,9 @@ SHOW CREATE TABLE **Example:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -232,12 +244,15 @@ Used to update a table, including adding or deleting columns and configuring tab **Syntax:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **Note::** @@ -249,11 +264,11 @@ ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? col **Example:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 1.6 Delete Tables @@ -269,6 +284,6 @@ DROP TABLE (IF EXISTS)? **Examples:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` \ No newline at end of file diff --git a/src/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md b/src/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md index 112f90f6a..e42212e73 100644 --- a/src/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md +++ b/src/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md @@ -127,14 +127,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **Examples:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -162,7 +167,9 @@ Used to view column names, data types, categories, and states of a table. **Examples:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -177,8 +184,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -214,7 +224,9 @@ SHOW CREATE TABLE **Example:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -231,12 +243,15 @@ Used to update a table, including adding or deleting columns and configuring tab **Syntax:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **Note::** @@ -248,11 +263,11 @@ ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? col **Example:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 1.6 Delete Tables @@ -268,6 +283,6 @@ DROP TABLE (IF EXISTS)? **Examples:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` \ No newline at end of file diff --git a/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_apache.md b/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_apache.md index 193fa718b..729c38c39 100644 --- a/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_apache.md +++ b/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_apache.md @@ -110,7 +110,9 @@ try (ITableSession session = After execution, you can verify the table creation using the following command: ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +-----------+---------+-----------+ | ColumnName| DataType| Category| +-----------+---------+-----------+ @@ -131,9 +133,9 @@ It is possible to insert data for specific columns. Columns not specified will r **Example:** ```SQL -INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1) +INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:38:00', 91.0) +INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:38:00', 91.0); ``` ### 1.4 Null Value Insertion @@ -145,10 +147,10 @@ You can explicitly set `null` values for tag columns, attribute columns, and fie Equivalent to the above partial column insertion. ```SQL -# Equivalent to the example above -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1) +# Equivalent to the example above; +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null) +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null); ``` If no tag columns are included, the system will automatically create a device with all tag column values set to `null`. @@ -165,13 +167,13 @@ IoTDB supports inserting multiple rows of data in a single statement to improve INSERT INTO table1 VALUES ('2025-11-26 13:37:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('2025-11-26 13:38:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25') +('2025-11-26 13:38:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25'); INSERT INTO table1 (region, plant_id, device_id, model_id, maintenance, time, temperature, humidity, status, arrival_time) VALUES ('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:37:00', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25') +('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25'); ``` #### Notes @@ -201,7 +203,7 @@ Using the [sample data](../Reference/Sample-Data.md) as the data source, first c sql ```sql -IoTDB:database1> CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); +CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); Msg: The statement is executed successfully. ``` @@ -214,9 +216,13 @@ The `query` part is a direct `select ... from ...` query. sql ```sql -IoTDB:database1> insert into target_table select time,region,device_id,temperature from table1 where region = 'Beijing' +insert into target_table select time,region,device_id,temperature from table1 where region = 'Beijing'; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region='Beijing' +``` +```sql +select * from target_table where region='Beijing'; +``` +```shell +-----------------------------+--------+-----------+-------------+ | time| region| device_id| temperature| +-----------------------------+--------+-----------+-------------+ @@ -243,9 +249,13 @@ The `query` part uses the table reference syntax `table source_table`. sql ```sql -IoTDB:database1> insert into target_table(time,device_id,temperature) table table3 +insert into target_table(time,device_id,temperature) table table3; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region is null +``` +```sql +select * from target_table where region is null; +``` +```shell +-----------------------------+------+-----------+-------------+ | time|region| device_id| temperature| +-----------------------------+------+-----------+-------------+ @@ -270,9 +280,13 @@ The `query` part is a parenthesized subquery. sql ```sql -IoTDB:database1> insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = 'Shanghai')) +insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = 'Shanghai')); Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region = 'Shanghai' +``` +```sql +select * from target_table where region = 'Shanghai'; +``` +```shell +-----------------------------+---------+-----------+-------------+ | time| region| device_id| temperature| +-----------------------------+---------+-----------+-------------+ @@ -339,13 +353,13 @@ INSERT INTO table1(time, device_id, s1) VALUES(NOW(), 'tag1', TO_OBJECT(TRUE, 0, 2. **Segmented write** ```SQL --- First write: TO_OBJECT(FALSE, 0, X'696F') +-- First write: TO_OBJECT(FALSE, 0, X'696F'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(FALSE, 0, X'696F')); --- Second write: TO_OBJECT(FALSE, 2, X'7464') +-- Second write: TO_OBJECT(FALSE, 2, X'7464'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(FALSE, 2, X'7464')); --- Third write: TO_OBJECT(TRUE, 4, X'62') +-- Third write: TO_OBJECT(TRUE, 4, X'62'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(TRUE, 4, X'62')); ``` @@ -379,5 +393,5 @@ updateAssignment **Example**: ```SQL -update table1 set b = a where substring(a, 1, 1) like '%' +update table1 set b = a where substring(a, 1, 1) like '%'; ``` diff --git a/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md b/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md index ef146a655..e6055f40d 100644 --- a/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md +++ b/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md @@ -110,7 +110,9 @@ try (ITableSession session = After execution, you can verify the table creation using the following command: ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +-----------+---------+-----------+ | ColumnName| DataType| Category| +-----------+---------+-----------+ @@ -131,9 +133,9 @@ It is possible to insert data for specific columns. Columns not specified will r **Example:** ```SQL -INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1) +INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:38:00', 91.0) +INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:38:00', 91.0); ``` ### 1.4 Null Value Insertion @@ -145,10 +147,10 @@ You can explicitly set `null` values for tag columns, attribute columns, and fie Equivalent to the above partial column insertion. ```SQL -# Equivalent to the example above -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1) +# Equivalent to the example above; +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null) +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null); ``` If no tag columns are included, the system will automatically create a device with all tag column values set to `null`. @@ -165,13 +167,13 @@ IoTDB supports inserting multiple rows of data in a single statement to improve INSERT INTO table1 VALUES ('2025-11-26 13:37:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('2025-11-26 13:38:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25') +('2025-11-26 13:38:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25'); INSERT INTO table1 (region, plant_id, device_id, model_id, maintenance, time, temperature, humidity, status, arrival_time) VALUES ('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:37:00', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25') +('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25'); ``` #### Notes @@ -201,7 +203,7 @@ Using the [sample data](../Reference/Sample-Data.md) as the data source, first c sql ```sql -IoTDB:database1> CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); +CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); Msg: The statement is executed successfully. ``` @@ -214,9 +216,13 @@ The `query` part is a direct `select ... from ...` query. sql ```sql -IoTDB:database1> insert into target_table select time,region,device_id,temperature from table1 where region = 'Beijing' +insert into target_table select time,region,device_id,temperature from table1 where region = 'Beijing'; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region='Beijing' +``` +```sql +select * from target_table where region='Beijing'; +``` +```shell +-----------------------------+--------+-----------+-------------+ | time| region| device_id| temperature| +-----------------------------+--------+-----------+-------------+ @@ -243,9 +249,13 @@ The `query` part uses the table reference syntax `table source_table`. sql ```sql -IoTDB:database1> insert into target_table(time,device_id,temperature) table table3 +insert into target_table(time,device_id,temperature) table table3; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region is null +``` +```sql +select * from target_table where region is null; +``` +```shell +-----------------------------+------+-----------+-------------+ | time|region| device_id| temperature| +-----------------------------+------+-----------+-------------+ @@ -270,9 +280,13 @@ The `query` part is a parenthesized subquery. sql ```sql -IoTDB:database1> insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = 'Shanghai')) +insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = 'Shanghai')); Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region = 'Shanghai' +``` +```sql +select * from target_table where region = 'Shanghai'; +``` +```shell +-----------------------------+---------+-----------+-------------+ | time| region| device_id| temperature| +-----------------------------+---------+-----------+-------------+ @@ -339,13 +353,13 @@ INSERT INTO table1(time, device_id, s1) VALUES(NOW(), 'tag1', TO_OBJECT(TRUE, 0, 2. **Segmented write** ```SQL --- First write: TO_OBJECT(FALSE, 0, X'696F') +-- First write: TO_OBJECT(FALSE, 0, X'696F'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(FALSE, 0, X'696F')); --- Second write: TO_OBJECT(FALSE, 2, X'7464') +-- Second write: TO_OBJECT(FALSE, 2, X'7464'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(FALSE, 2, X'7464')); --- Third write: TO_OBJECT(TRUE, 4, X'62') +-- Third write: TO_OBJECT(TRUE, 4, X'62'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(TRUE, 4, X'62')); ``` @@ -378,5 +392,5 @@ updateAssignment **Example**: ```SQL -update table1 set b = a where substring(a, 1, 1) like '%' +update table1 set b = a where substring(a, 1, 1) like '%'; ``` diff --git a/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_apache.md b/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_apache.md index 00cb489d3..2246ab609 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_apache.md +++ b/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_apache.md @@ -39,7 +39,7 @@ CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? CREATE DATABASE database1; CREATE DATABASE IF NOT EXISTS database1; --- Create database with 1-year TTL +-- Create database with 1-year TTL; CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); ``` @@ -54,7 +54,7 @@ USE **Examples:** ```SQL -USE database1 +USE database1; ``` ### 1.3 View Current Database @@ -62,22 +62,26 @@ USE database1 **Syntax:** ```SQL -SHOW CURRENT_DATABASE +SHOW CURRENT_DATABASE; ``` **Examples:** ```SQL -IoTDB> SHOW CURRENT_DATABASE; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ | null| +---------------+ - -IoTDB> USE database1; - -IoTDB> SHOW CURRENT_DATABASE; +``` +```sql +USE database1; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ @@ -98,15 +102,20 @@ SHOW DATABASES (DETAILS)? **Examples:** ```SQL -IoTDB> show databases +show databases; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| +------------------+-------+-----------------------+---------------------+---------------------+ | database1| INF| 1| 1| 604800000| |information_schema| INF| null| null| null| +------------------+-------+-----------------------+---------------------+---------------------+ - -IoTDB> show databases details +``` +```sql +show databases details; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|DataRegionGroupNum| +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ @@ -140,7 +149,7 @@ DROP DATABASE (IF EXISTS)? **Examples:** ```SQL -DROP DATABASE IF EXISTS database1 +DROP DATABASE IF EXISTS database1; ``` ## 2. Table Management @@ -218,14 +227,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **Examples:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -244,7 +258,9 @@ IoTDB> show tables details from database1 **Examples:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -259,8 +275,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -289,7 +308,9 @@ SHOW CREATE TABLE **Examples:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -305,22 +326,25 @@ Total line number = 1 **Syntax:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **Examples:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 2.6 Drop Table @@ -334,8 +358,8 @@ DROP TABLE (IF EXISTS)? **Examples:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` diff --git a/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md b/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md index 5c68a9b43..7de333850 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md +++ b/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md @@ -39,7 +39,7 @@ CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? CREATE DATABASE database1; CREATE DATABASE IF NOT EXISTS database1; --- Create database with 1-year TTL +-- Create database with 1-year TTL; CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); ``` @@ -54,7 +54,7 @@ USE **Examples:** ```SQL -USE database1 +USE database1; ``` ### 1.3 View Current Database @@ -62,22 +62,26 @@ USE database1 **Syntax:** ```SQL -SHOW CURRENT_DATABASE +SHOW CURRENT_DATABASE; ``` **Examples:** ```SQL -IoTDB> SHOW CURRENT_DATABASE; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ | null| +---------------+ - -IoTDB> USE database1; - -IoTDB> SHOW CURRENT_DATABASE; +``` +```sql +USE database1; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ @@ -98,15 +102,20 @@ SHOW DATABASES (DETAILS)? **Examples:** ```SQL -IoTDB> show databases +show databases; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| +------------------+-------+-----------------------+---------------------+---------------------+ | database1| INF| 1| 1| 604800000| |information_schema| INF| null| null| null| +------------------+-------+-----------------------+---------------------+---------------------+ - -IoTDB> show databases details +``` +```sql +show databases details; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|DataRegionGroupNum| +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ @@ -140,7 +149,7 @@ DROP DATABASE (IF EXISTS)? **Examples:** ```SQL -DROP DATABASE IF EXISTS database1 +DROP DATABASE IF EXISTS database1; ``` ## 2. Table Management @@ -218,14 +227,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **Examples:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -244,7 +258,9 @@ IoTDB> show tables details from database1 **Examples:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -259,8 +275,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -289,7 +308,9 @@ SHOW CREATE TABLE **Examples:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -304,22 +325,25 @@ Total line number = 1 **Syntax:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **Examples:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 2.6 Drop Table @@ -333,8 +357,8 @@ DROP TABLE (IF EXISTS)? **Examples:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_apache.md b/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_apache.md index d4c6e51dd..10cf8b1ca 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_apache.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_apache.md @@ -27,9 +27,9 @@ According to the storage model we can set up the corresponding database. Two SQL statements are supported for creating databases, as follows: -``` -IoTDB > create database root.ln -IoTDB > create database root.sgcc +```sql +create database root.ln; +create database root.sgcc; ``` We can thus create two databases using the above two SQL statements. @@ -38,11 +38,11 @@ It is worth noting that 1 database is recommended. When the path itself or the parent/child layer of the path is already created as database, the path is then not allowed to be created as database. For example, it is not feasible to create `root.ln.wf01` as database when two databases `root.ln` and `root.sgcc` exist. The system gives the corresponding error prompt as shown below: -``` -IoTDB> CREATE DATABASE root.ln.wf01 -Msg: 300: root.ln has already been created as database. -IoTDB> create database root.ln.wf01 -Msg: 300: root.ln has already been created as database. +```sql +CREATE DATABASE root.ln.wf01; +Msg: 300: root.ln has already been created as database; +create database root.ln.wf01; +Msg: 300: root.ln has already been created as database; ``` Database Node Naming Rules: @@ -59,9 +59,9 @@ Besides, if deploy on Windows system, the LayerName is case-insensitive, which m After creating the database, we can use the [SHOW DATABASES](../SQL-Manual/SQL-Manual_apache) statement and [SHOW DATABASES \](../SQL-Manual/SQL-Manual_apache) to view the databases. The SQL statements are as follows: -``` -IoTDB> SHOW DATABASES -IoTDB> SHOW DATABASES root.** +```sql +SHOW DATABASES; +SHOW DATABASES root.**; ``` The result is as follows: @@ -81,11 +81,11 @@ It costs 0.060s User can use the `DELETE DATABASE ` statement to delete all databases matching the pathPattern. Please note the data in the database will also be deleted. -``` -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// delete all data, all timeseries and all databases -IoTDB > DELETE DATABASE root.** +```sql +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// delete all data, all timeseries and all databases; +DELETE DATABASE root.**; ``` ### 1.4 Count Databases @@ -94,11 +94,11 @@ User can use the `COUNT DATABASE ` statement to count the number of SQL statement is as follows: -``` -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +```sql +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` The result is as follows: @@ -176,13 +176,13 @@ Users can set any heterogeneous parameters when creating a Database, or adjust s The user can set any of the above heterogeneous parameters when creating a Database. The SQL statement is as follows: -``` +```sql CREATE DATABASE prefixPath (WITH databaseAttributeClause (COMMA? databaseAttributeClause)*)? ``` For example: -``` +```sql CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTOR=3, SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -190,13 +190,13 @@ CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTO Users can adjust some heterogeneous parameters during the IoTDB runtime, as shown in the following SQL statement: -``` +```sql ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)* ``` For example: -``` +```sql ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -209,14 +209,14 @@ Note that only the following heterogeneous parameters can be adjusted at runtime The user can query the specific heterogeneous configuration of each Database, and the SQL statement is as follows: -``` +```sql SHOW DATABASES DETAILS prefixPath? ``` For example: -``` -IoTDB> SHOW DATABASES DETAILS +```sql +SHOW DATABASES DETAILS +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ |Database| TTL|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|MinSchemaRegionGroupNum|MaxSchemaRegionGroupNum|DataRegionGroupNum|MinDataRegionGroupNum|MaxDataRegionGroupNum| +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ @@ -278,7 +278,7 @@ The set ttl operation can be understood as setting a TTL rule, for example, sett The unset ttl operation indicates unmounting TTL for the corresponding path pattern; if there is no corresponding TTL, nothing will be done. If you want to set TTL to be infinitely large, you can use the INF keyword. The SQL Statement for setting TTL is as follow: -``` +```sql set ttl to pathPattern 360000; ``` Set the Time to Live (TTL) to a pathPattern of 360,000 milliseconds; the pathPattern should not contain a wildcard (\*) in the middle and must end with a double asterisk (\*\*). The pathPattern is used to match corresponding devices. @@ -289,25 +289,25 @@ It is also permissible to specify a particular device without a wildcard (*). To unset TTL, we can use follwing SQL statement: -``` -IoTDB> unset ttl from root.ln +```sql +unset ttl from root.ln ``` After unset TTL, all data will be accepted in `root.ln`. -``` -IoTDB> unset ttl from root.sgcc.** +```sql +unset ttl from root.sgcc.** ``` Unset the TTL in the `root.sgcc` path. New syntax -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.** ``` Old syntax -``` -IoTDB> unset ttl to root.** +```sql +unset ttl to root.** ``` There is no functional difference between the old and new syntax, and they are compatible with each other. The new syntax is just more conventional in terms of wording. @@ -320,8 +320,8 @@ To Show TTL, we can use following SQL statement: show all ttl -``` -IoTDB> SHOW ALL TTL +```sql +SHOW ALL TTL +--------------+--------+ | path| TTL| | root.**|55555555| @@ -330,8 +330,8 @@ IoTDB> SHOW ALL TTL ``` show ttl on pathPattern -``` -IoTDB> SHOW TTL ON root.db.**; +```sql +SHOW TTL ON root.db.**; +--------------+--------+ | path| TTL| | root.db.**|55555555| @@ -343,8 +343,8 @@ The SHOW ALL TTL example gives the TTL for all path patterns. The SHOW TTL ON pathPattern shows the TTL for the path pattern specified. Display devices' ttl -``` -IoTDB> show devices +```sql +show devices +---------------+---------+---------+ | Device|IsAligned| TTL| +---------------+---------+---------+ @@ -372,22 +372,22 @@ CREATE DEVICE TEMPLATE ALIGNED? '(' create device template t1 (temperature FLOAT, status BOOLEAN) +```sql +create device template t1 (temperature FLOAT, status BOOLEAN) ``` **Example 2:** Create a template containing a group of aligned timeseries -```shell -IoTDB> create device template t2 aligned (lat FLOAT, lon FLOAT) +```sql +create device template t2 aligned (lat FLOAT, lon FLOAT) ``` The` lat` and `lon` measurements are aligned. When creating a template, the system will automatically assign default encoding and compression methods, requiring no manual specification. If your business scenario requires custom adjustments, you may refer to the following example: -```shell -IoTDB> create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) +```sql +create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) ``` For a full list of supported data types and corresponding encoding methods, please refer to [Compression & Encoding](../Technical-Insider/Encoding-and-Compression.md)。 @@ -404,8 +404,8 @@ After a device template is created, it should be set to specific path before cre The SQL Statement for setting device template is as follow: -```shell -IoTDB> set device template t1 to root.sg1.d1 +```sql +set device template t1 to root.sg1.d1 ``` ### 2.3 Activate Device Template @@ -415,17 +415,17 @@ After setting the device template, with the system enabled to auto create schema **Attention**: Before inserting data or the system not enabled to auto create schema, timeseries defined by the device template will not be created. You can use the following SQL statement to create the timeseries or activate the templdeviceate, act before inserting data: -```shell -IoTDB> create timeseries using device template on root.sg1.d1 +```sql +create timeseries using device template on root.sg1.d1 ``` **Example:** Execute the following statement -```shell -IoTDB> set device template t1 to root.sg1.d1 -IoTDB> set device template t2 to root.sg1.d2 -IoTDB> create timeseries using device template on root.sg1.d1 -IoTDB> create timeseries using device template on root.sg1.d2 +```sql +set device template t1 to root.sg1.d1; +set device template t2 to root.sg1.d2; +create timeseries using device template on root.sg1.d1; +create timeseries using device template on root.sg1.d2; ``` Show the time series: @@ -434,7 +434,7 @@ Show the time series: show timeseries root.sg1.** ```` -```shell +```sql +-----------------------+-----+-------------+--------+--------+-----------+----+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression|tags|attributes|deadband|deadband parameters| +-----------------------+-----+-------------+--------+--------+-----------+----+----------+--------+-------------------+ @@ -451,7 +451,7 @@ Show the devices: show devices root.sg1.** ```` -```shell +```sql +---------------+---------+ | devices|isAligned| +---------------+---------+ @@ -466,13 +466,13 @@ show devices root.sg1.** The SQL statement looks like this: -```shell -IoTDB> show device templates +```sql +show device templates ``` The execution result is as follows: -```shell +```sql +-------------+ |template name| +-------------+ @@ -485,13 +485,13 @@ The execution result is as follows: The SQL statement looks like this: -```shell -IoTDB> show nodes in device template t1 +```sql +show nodes in device template t1 ``` The execution result is as follows: -```shell +```sql +-----------+--------+--------+-----------+ |child nodes|dataType|encoding|compression| +-----------+--------+--------+-----------+ @@ -502,13 +502,13 @@ The execution result is as follows: - Show the path prefix where a device template is set -```shell -IoTDB> show paths set device template t1 +```sql +show paths set device template t1 ``` The execution result is as follows: -```shell +```sql +-----------+ |child paths| +-----------+ @@ -518,13 +518,13 @@ The execution result is as follows: - Show the path prefix where a device template is used (i.e. the time series has been created) -```shell -IoTDB> show paths using device template t1 +```sql +show paths using device template t1 ``` The execution result is as follows: -```shell +```sql +-----------+ |child paths| +-----------+ @@ -536,26 +536,26 @@ The execution result is as follows: To delete a group of timeseries represented by device template, namely deactivate the device template, use the following SQL statement: -```shell -IoTDB> delete timeseries of device template t1 from root.sg1.d1 +```sql +delete timeseries of device template t1 from root.sg1.d1 ``` or -```shell -IoTDB> deactivate device template t1 from root.sg1.d1 +```sql +deactivate device template t1 from root.sg1.d1 ``` The deactivation supports batch process. -```shell -IoTDB> delete timeseries of device template t1 from root.sg1.*, root.sg2.* +```sql +delete timeseries of device template t1 from root.sg1.*, root.sg2.* ``` or -```shell -IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* +```sql +deactivate device template t1 from root.sg1.*, root.sg2.* ``` If the template name is not provided in sql, all template activation on paths matched by given path pattern will be removed. @@ -564,8 +564,8 @@ If the template name is not provided in sql, all template activation on paths ma The SQL Statement for unsetting device template is as follow: -```shell -IoTDB> unset device template t1 from root.sg1.d1 +```sql +unset device template t1 from root.sg1.d1 ``` **Attention**: It should be guaranteed that none of the timeseries represented by the target device template exists, before unset it. It can be achieved by deactivation operation. @@ -574,8 +574,8 @@ IoTDB> unset device template t1 from root.sg1.d1 The SQL Statement for dropping device template is as follow: -```shell -IoTDB> drop device template t1 +```sql +drop device template t1 ``` **Attention**: Dropping an already set template is not supported. @@ -586,8 +586,8 @@ In a scenario where measurements need to be added, you can modify the template The SQL Statement for altering device template is as follow: -```shell -IoTDB> alter device template t1 add (speed FLOAT) +```sql +alter device template t1 add (speed FLOAT) ``` **When executing data insertion to devices with device template set on related prefix path and there are measurements not present in this device template, the measurements will be auto added to this device template.** @@ -598,36 +598,36 @@ IoTDB> alter device template t1 add (speed FLOAT) According to the storage model selected before, we can create corresponding timeseries in the two databases respectively. The SQL statements for creating timeseries are as follows: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +```sql +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` From v0.13, you can use a simplified version of the SQL statements to create timeseries: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT +```sql +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` When creating a timeseries, the system will automatically assign default encoding and compression methods, requiring no manual specification. If your business scenario requires custom adjustments, you may refer to the following example: -```shell -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY +```sql +create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY ``` Note that if you manually specify an encoding method that is incompatible with the data type, the system will return an error message, as shown below: -``` -IoTDB > create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF +```sql +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF; error: encoding TS_2DIFF does not support BOOLEAN ``` @@ -637,8 +637,8 @@ For a full list of supported data types and corresponding encoding methods, plea The SQL statement for creating a group of timeseries are as follows: -``` -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +```sql +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) ``` You can set different datatype, encoding, and compression for the timeseries in a group of aligned timeseries @@ -651,11 +651,11 @@ To delete the timeseries we created before, we are able to use `(DELETE | DROP) The usage are as follows: -``` -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +```sql +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 3.4 Show Timeseries @@ -676,9 +676,9 @@ Examples: returns all timeseries information matching the given <`PathPattern`>. SQL statements are as follows: -``` -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** +```sql +show timeseries root.**; +show timeseries root.ln.**; ``` The results are shown below respectively: @@ -714,7 +714,7 @@ It costs 0.004s returns all the timeseries information start from the offset and limit the number of series returned. For example, -``` +```sql show timeseries root.ln.** limit 10 offset 10 ``` @@ -722,7 +722,7 @@ show timeseries root.ln.** limit 10 offset 10 The query result set is filtered by string fuzzy matching based on the names of the timeseries. For example: -``` +```sql show timeseries root.ln.** where timeseries contains 'wf01.wt' ``` @@ -743,7 +743,7 @@ It costs 0.016s The query result set is filtered by data type. For example: -``` +```sql show timeseries root.ln.** where dataType=FLOAT ``` @@ -767,9 +767,9 @@ It costs 0.016s The query result set is filtered by tags. For example: -``` -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` The query results are as follows: @@ -811,16 +811,16 @@ IoTDB is able to use `COUNT TIMESERIES ` to count the number of timeseries * `LEVEL` could be defined to show count the number of timeseries of each node at the given level in current Metadata Tree. This could be used to query the number of sensors under each device. The grammar is: `COUNT TIMESERIES GROUP BY LEVEL=`. -``` -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +```sql +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' ; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; ``` For example, if there are several timeseries (use `show timeseries` to show all timeseries): @@ -847,10 +847,10 @@ Then the Metadata Tree will be as below: As can be seen, `root` is considered as `LEVEL=0`. So when you enter statements such as: -``` -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` You will get following results: @@ -897,7 +897,7 @@ The differences between tag and attribute are: The SQL statements for creating timeseries with extra tag and attribute information are extended as follows: -``` +```sql create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) ``` @@ -911,31 +911,31 @@ We can update the tag information after creating it as following: * Rename the tag/attribute key -``` +```sql ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 ``` * Reset the tag/attribute value -``` +```sql ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 ``` * Delete the existing tag/attribute -``` +```sql ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 ``` * Add new tags -``` +```sql ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 ``` * Add new attributes -``` +```sql ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 ``` @@ -943,23 +943,23 @@ ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 > add alias or a new key-value if the alias or key doesn't exist, otherwise, update the old one with new value. -``` +```sql ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) ``` * Show timeseries using tags. Use TAGS(tagKey) to identify the tags used as filter key -``` +```sql SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause ``` returns all the timeseries information that satisfy the where condition and match the pathPattern. SQL statements are as follows: -``` -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` The results are shown below respectly: @@ -984,23 +984,23 @@ It costs 0.004s - count timeseries using tags -``` -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +```sql +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` returns all the number of timeseries that satisfy the where condition and match the pathPattern. SQL statements are as follows: -``` -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +```sql +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` The results are shown below respectly : ``` -IoTDB> count timeseries +count timeseries +-----------------+ |count(timeseries)| +-----------------+ @@ -1008,7 +1008,7 @@ IoTDB> count timeseries +-----------------+ Total line number = 1 It costs 0.019s -IoTDB> count timeseries root.** where TAGS(unit)='c' +count timeseries root.** where TAGS(unit)='c' +-----------------+ |count(timeseries)| +-----------------+ @@ -1016,7 +1016,7 @@ IoTDB> count timeseries root.** where TAGS(unit)='c' +-----------------+ Total line number = 1 It costs 0.020s -IoTDB> count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries root.** where TAGS(unit)='c' group by level = 2 +--------------+-----------------+ | column|count(timeseries)| +--------------+-----------------+ @@ -1032,14 +1032,14 @@ It costs 0.011s create aligned timeseries -``` +```sql create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) ``` The execution result is as follows: -``` -IoTDB> show timeseries +```sql +show timeseries +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -1050,8 +1050,8 @@ IoTDB> show timeseries Support query: -``` -IoTDB> show timeseries where TAGS(tag1)='v1' +```sql +show timeseries where TAGS(tag1)='v1' +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -1117,7 +1117,7 @@ To make it more convenient and efficient to express multiple time series, IoTDB ### 4.5 Show Child Paths -``` +```sql SHOW CHILD PATHS pathPattern ``` @@ -1145,7 +1145,7 @@ It costs 0.002s ### 4.6 Show Child Nodes -``` +```sql SHOW CHILD NODES pathPattern ``` @@ -1182,11 +1182,11 @@ IoTDB is able to use `COUNT NODES LEVEL=` to count the nu This could be used to query the number of devices with specified measurements. The usage are as follows: -``` -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.** LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +```sql +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.** LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` As for the above mentioned example and Metadata tree, you can get following results: @@ -1239,10 +1239,10 @@ Similar to `Show Timeseries`, IoTDB also supports two ways of viewing devices: SQL statement is as follows: -``` -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' +```sql +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; ``` You can get results below: @@ -1279,9 +1279,9 @@ To view devices' information with database, we can use `SHOW DEVICES WITH DATABA SQL statement is as follows: -``` -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +```sql +show devices with database; +show devices root.ln.** with database; ``` You can get results below: @@ -1316,10 +1316,10 @@ The above statement is used to count the number of devices. At the same time, it SQL statement is as follows: -``` -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +```sql +show devices; +count devices; +count devices root.ln.**; ``` You can get results below: diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md b/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md index 5bc7d549b..d3ec0bfe3 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md @@ -27,9 +27,9 @@ According to the storage model we can set up the corresponding database. Two SQL statements are supported for creating databases, as follows: -``` -IoTDB > create database root.ln -IoTDB > create database root.sgcc +```sql +create database root.ln; +create database root.sgcc; ``` We can thus create two databases using the above two SQL statements. @@ -38,11 +38,11 @@ It is worth noting that 1 database is recommended. When the path itself or the parent/child layer of the path is already created as database, the path is then not allowed to be created as database. For example, it is not feasible to create `root.ln.wf01` as database when two databases `root.ln` and `root.sgcc` exist. The system gives the corresponding error prompt as shown below: -``` -IoTDB> CREATE DATABASE root.ln.wf01 -Msg: 300: root.ln has already been created as database. -IoTDB> create database root.ln.wf01 -Msg: 300: root.ln has already been created as database. +```sql +CREATE DATABASE root.ln.wf01; +Msg: 300: root.ln has already been created as database; +create database root.ln.wf01; +Msg: 300: root.ln has already been created as database; ``` Database Node Naming Rules: @@ -59,9 +59,9 @@ Besides, if deploy on Windows system, the LayerName is case-insensitive, which m After creating the database, we can use the [SHOW DATABASES](../SQL-Manual/SQL-Manual_timecho) statement and [SHOW DATABASES \](../SQL-Manual/SQL-Manual_timecho) to view the databases. The SQL statements are as follows: -``` -IoTDB> SHOW DATABASES -IoTDB> SHOW DATABASES root.** +```sql +SHOW DATABASES; +SHOW DATABASES root.**; ``` The result is as follows: @@ -81,11 +81,11 @@ It costs 0.060s User can use the `DELETE DATABASE ` statement to delete all databases matching the pathPattern. Please note the data in the database will also be deleted. -``` -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// delete all data, all timeseries and all databases -IoTDB > DELETE DATABASE root.** +```sql +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// delete all data, all timeseries and all databases; +DELETE DATABASE root.**; ``` ### 1.4 Count Databases @@ -94,11 +94,11 @@ User can use the `COUNT DATABASE ` statement to count the number of SQL statement is as follows: -``` -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +```sql +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` The result is as follows: @@ -176,13 +176,13 @@ Users can set any heterogeneous parameters when creating a Database, or adjust s The user can set any of the above heterogeneous parameters when creating a Database. The SQL statement is as follows: -``` +```sql CREATE DATABASE prefixPath (WITH databaseAttributeClause (COMMA? databaseAttributeClause)*)? ``` For example: -``` +```sql CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTOR=3, SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -190,13 +190,13 @@ CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTO Users can adjust some heterogeneous parameters during the IoTDB runtime, as shown in the following SQL statement: -``` -ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)* +```sql +ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)*; ``` For example: -``` +```sql ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -209,14 +209,16 @@ Note that only the following heterogeneous parameters can be adjusted at runtime The user can query the specific heterogeneous configuration of each Database, and the SQL statement is as follows: -``` +```sql SHOW DATABASES DETAILS prefixPath? ``` For example: +```sql +SHOW DATABASES DETAILS +``` ``` -IoTDB> SHOW DATABASES DETAILS +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ |Database| TTL|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|MinSchemaRegionGroupNum|MaxSchemaRegionGroupNum|DataRegionGroupNum|MinDataRegionGroupNum|MaxDataRegionGroupNum| +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ @@ -278,7 +280,8 @@ The set ttl operation can be understood as setting a TTL rule, for example, sett The unset ttl operation indicates unmounting TTL for the corresponding path pattern; if there is no corresponding TTL, nothing will be done. If you want to set TTL to be infinitely large, you can use the INF keyword. The SQL Statement for setting TTL is as follow: -``` + +```sql set ttl to pathPattern 360000; ``` Set the Time to Live (TTL) to a pathPattern of 360,000 milliseconds; the pathPattern should not contain a wildcard (\*) in the middle and must end with a double asterisk (\*\*). The pathPattern is used to match corresponding devices. @@ -289,25 +292,28 @@ It is also permissible to specify a particular device without a wildcard (*). To unset TTL, we can use follwing SQL statement: -``` -IoTDB> unset ttl from root.ln +```sql +unset ttl from root.ln ``` After unset TTL, all data will be accepted in `root.ln`. -``` -IoTDB> unset ttl from root.sgcc.** + +```sql +unset ttl from root.sgcc.** ``` Unset the TTL in the `root.sgcc` path. New syntax -``` -IoTDB> unset ttl from root.** + +```sql +unset ttl from root.** ``` Old syntax -``` -IoTDB> unset ttl to root.** + +```sql +unset ttl to root.** ``` There is no functional difference between the old and new syntax, and they are compatible with each other. The new syntax is just more conventional in terms of wording. @@ -320,8 +326,10 @@ To Show TTL, we can use following SQL statement: show all ttl +```sql +SHOW ALL TTL; +``` ``` -IoTDB> SHOW ALL TTL +--------------+--------+ | path| TTL| | root.**|55555555| @@ -330,8 +338,10 @@ IoTDB> SHOW ALL TTL ``` show ttl on pathPattern +```sql +SHOW TTL ON root.db.**; +``` ``` -IoTDB> SHOW TTL ON root.db.**; +--------------+--------+ | path| TTL| | root.db.**|55555555| @@ -343,8 +353,10 @@ The SHOW ALL TTL example gives the TTL for all path patterns. The SHOW TTL ON pathPattern shows the TTL for the path pattern specified. Display devices' ttl +```sql +show devices; +``` ``` -IoTDB> show devices +---------------+---------+---------+ | Device|IsAligned| TTL| +---------------+---------+---------+ @@ -361,36 +373,36 @@ All devices will definitely have a TTL, meaning it cannot be null. INF represent According to the storage model selected before, we can create corresponding timeseries in the two databases respectively. The SQL statements for creating timeseries are as follows: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +```sql +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` From v0.13, you can use a simplified version of the SQL statements to create timeseries: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT +```sql +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` When creating a timeseries, the system will automatically assign default encoding and compression methods, requiring no manual specification. If your business scenario requires custom adjustments, you may refer to the following example: -```shell -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY +```sql +create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY; ``` Note that if you manually specify an encoding method that is incompatible with the data type, the system will return an error message, as shown below: -``` -IoTDB > create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF +```sql +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF; error: encoding TS_2DIFF does not support BOOLEAN ``` @@ -401,8 +413,8 @@ For a full list of supported data types and corresponding encoding methods, plea The SQL statement for creating a group of timeseries are as follows: -``` -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +```sql +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` You can set different datatype, encoding, and compression for the timeseries in a group of aligned timeseries @@ -415,11 +427,11 @@ To delete the timeseries we created before, we are able to use `(DELETE | DROP) The usage are as follows: -``` -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +```sql +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 2.4 Show Timeseries @@ -440,9 +452,9 @@ Examples: returns all timeseries information matching the given <`PathPattern`>. SQL statements are as follows: -``` -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** +```sql +show timeseries root.**; +show timeseries root.ln.**; ``` The results are shown below respectively: @@ -478,7 +490,7 @@ It costs 0.004s returns all the timeseries information start from the offset and limit the number of series returned. For example, -``` +```sql show timeseries root.ln.** limit 10 offset 10 ``` @@ -486,7 +498,7 @@ show timeseries root.ln.** limit 10 offset 10 The query result set is filtered by string fuzzy matching based on the names of the timeseries. For example: -``` +```sql show timeseries root.ln.** where timeseries contains 'wf01.wt' ``` @@ -507,7 +519,7 @@ It costs 0.016s The query result set is filtered by data type. For example: -``` +```sql show timeseries root.ln.** where dataType=FLOAT ``` @@ -532,9 +544,9 @@ It costs 0.016s The query result set is filtered by tags. For example: -``` -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` The query results are as follows: @@ -576,16 +588,16 @@ IoTDB is able to use `COUNT TIMESERIES ` to count the number of timeseries * `LEVEL` could be defined to show count the number of timeseries of each node at the given level in current Metadata Tree. This could be used to query the number of sensors under each device. The grammar is: `COUNT TIMESERIES GROUP BY LEVEL=`. -``` -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +```sql +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' ; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; ``` For example, if there are several timeseries (use `show timeseries` to show all timeseries): @@ -612,10 +624,10 @@ Then the Metadata Tree will be as below: As can be seen, `root` is considered as `LEVEL=0`. So when you enter statements such as: -``` -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` You will get following results: @@ -657,11 +669,11 @@ By adding WHERE time filter conditions to the existing SHOW/COUNT TIMESERIES, we It is important to note that in metadata queries with time filters, views are not considered; only the time series actually stored in the TsFile are taken into account. An example usage is as follows: -``` -IoTDB> insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); -IoTDB> insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); -IoTDB> insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); -IoTDB> show timeseries; +```sql +insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); +insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); +insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); +show timeseries; +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ | Timeseries|Alias|Database|DataType|Encoding|Compression|Tags|Attributes|Deadband|DeadbandParameters|ViewType| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ @@ -673,7 +685,7 @@ IoTDB> show timeseries; |root.sg.data2.s2| null| root.sg| FLOAT| GORILLA| LZ4|null| null| null| null| BASE| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ -IoTDB> show timeseries where time >= 15000 and time < 16000; +show timeseries where time >= 15000 and time < 16000; +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ | Timeseries|Alias|Database|DataType|Encoding|Compression|Tags|Attributes|Deadband|DeadbandParameters|ViewType| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ @@ -683,7 +695,7 @@ IoTDB> show timeseries where time >= 15000 and time < 16000; |root.sg.data2.s2| null| root.sg| FLOAT| GORILLA| LZ4|null| null| null| null| BASE| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ -IoTDB> count timeseries where time >= 15000 and time < 16000; +count timeseries where time >= 15000 and time < 16000; +-----------------+ |count(timeseries)| +-----------------+ @@ -702,8 +714,8 @@ The differences between tag and attribute are: The SQL statements for creating timeseries with extra tag and attribute information are extended as follows: -``` -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +```sql +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` The `temprature` in the brackets is an alias for the sensor `s1`. So we can use `temprature` to replace `s1` anywhere. @@ -716,31 +728,31 @@ We can update the tag information after creating it as following: * Rename the tag/attribute key -``` +```sql ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 ``` * Reset the tag/attribute value -``` +```sql ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 ``` * Delete the existing tag/attribute -``` +```sql ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 ``` * Add new tags -``` +```sql ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 ``` * Add new attributes -``` +```sql ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 ``` @@ -748,23 +760,23 @@ ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 > add alias or a new key-value if the alias or key doesn't exist, otherwise, update the old one with new value. -``` +```sql ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) ``` * Show timeseries using tags. Use TAGS(tagKey) to identify the tags used as filter key -``` +```sql SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause ``` returns all the timeseries information that satisfy the where condition and match the pathPattern. SQL statements are as follows: -``` -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` The results are shown below respectly: @@ -789,23 +801,23 @@ It costs 0.004s - count timeseries using tags -``` -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +```sql +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` returns all the number of timeseries that satisfy the where condition and match the pathPattern. SQL statements are as follows: -``` -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +```sql +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` The results are shown below respectly : -``` -IoTDB> count timeseries +```sql +count timeseries; +-----------------+ |count(timeseries)| +-----------------+ @@ -813,7 +825,7 @@ IoTDB> count timeseries +-----------------+ Total line number = 1 It costs 0.019s -IoTDB> count timeseries root.** where TAGS(unit)='c' +count timeseries root.** where TAGS(unit)='c'; +-----------------+ |count(timeseries)| +-----------------+ @@ -821,7 +833,7 @@ IoTDB> count timeseries root.** where TAGS(unit)='c' +-----------------+ Total line number = 1 It costs 0.020s -IoTDB> count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries root.** where TAGS(unit)='c' group by level = 2; +--------------+-----------------+ | column|count(timeseries)| +--------------+-----------------+ @@ -837,14 +849,14 @@ It costs 0.011s create aligned timeseries -``` +```sql create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) ``` The execution result is as follows: -``` -IoTDB> show timeseries +```sql +show timeseries +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -855,8 +867,8 @@ IoTDB> show timeseries Support query: -``` -IoTDB> show timeseries where TAGS(tag1)='v1' +```sql +show timeseries where TAGS(tag1)='v1' +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -922,7 +934,7 @@ To make it more convenient and efficient to express multiple time series, IoTDB ### 3.5 Show Child Paths -``` +```sql SHOW CHILD PATHS pathPattern ``` @@ -950,7 +962,7 @@ It costs 0.002s ### 3.6 Show Child Nodes -``` +```sql SHOW CHILD NODES pathPattern ``` @@ -987,11 +999,11 @@ IoTDB is able to use `COUNT NODES LEVEL=` to count the nu This could be used to query the number of devices with specified measurements. The usage are as follows: -``` -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.** LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +```sql +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.** LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` As for the above mentioned example and Metadata tree, you can get following results: @@ -1044,10 +1056,10 @@ Similar to `Show Timeseries`, IoTDB also supports two ways of viewing devices: SQL statement is as follows: -``` -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' +```sql +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; ``` You can get results below: @@ -1084,9 +1096,9 @@ To view devices' information with database, we can use `SHOW DEVICES WITH DATABA SQL statement is as follows: -``` -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +```sql +show devices with database; +show devices root.ln.** with database; ``` You can get results below: @@ -1121,10 +1133,10 @@ The above statement is used to count the number of devices. At the same time, it SQL statement is as follows: -``` -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +```sql +show devices; +count devices; +count devices root.ln.**; ``` You can get results below: @@ -1160,11 +1172,11 @@ It costs 0.004s ### 3.10 Active Device Query Similar to active timeseries query, we can add time filter conditions to device viewing and statistics to query active devices that have data within a certain time range. The definition of active here is the same as for active time series. An example usage is as follows: -``` -IoTDB> insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); -IoTDB> insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); -IoTDB> insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); -IoTDB> show devices; +```sql +insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); +insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); +insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); +show devices; +-------------------+---------+ | devices|isAligned| +-------------------+---------+ @@ -1173,7 +1185,7 @@ IoTDB> show devices; | root.sg.data3| false| +-------------------+---------+ -IoTDB> show devices where time >= 15000 and time < 16000; +show devices where time >= 15000 and time < 16000; +-------------------+---------+ | devices|isAligned| +-------------------+---------+ @@ -1181,7 +1193,7 @@ IoTDB> show devices where time >= 15000 and time < 16000; | root.sg.data2| false| +-------------------+---------+ -IoTDB> count devices where time >= 15000 and time < 16000; +count devices where time >= 15000 and time < 16000; +--------------+ |count(devices)| +--------------+ diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md b/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md index f55a597b3..5c70722ff 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md @@ -374,8 +374,10 @@ which means: Query and return the last data points of timeseries prefixPath.path **Example 1:** get the last point of root.ln.wf01.wt01.status: +```sql +select last status from root.ln.wf01.wt01 +``` ``` -IoTDB> select last status from root.ln.wf01.wt01 +-----------------------------+------------------------+-----+--------+ | Time| timeseries|value|dataType| +-----------------------------+------------------------+-----+--------+ @@ -387,8 +389,10 @@ It costs 0.000s **Example 2:** get the last status and temperature points of root.ln.wf01.wt01, whose timestamp larger or equal to 2017-11-07T23:50:00。 +```sql +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +``` ``` -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -401,8 +405,10 @@ It costs 0.002s **Example 3:** get the last points of all sensor in root.ln.wf01.wt01, and order the result by the timeseries column in descending order +```sql +select last * from root.ln.wf01.wt01 order by timeseries desc; +``` ``` -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -415,8 +421,10 @@ It costs 0.002s **Example 4:** get the last points of all sensor in root.ln.wf01.wt01, and order the result by the dataType column in descending order +```sql +select last * from root.ln.wf01.wt01 order by dataType desc; +``` ``` -IoTDB> select last * from root.ln.wf01.wt01 order by dataType desc; +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -429,8 +437,10 @@ It costs 0.002s **Note:** The requirement to query the latest data point with other filtering conditions can be implemented through function composition. For example: +```sql +select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +``` ``` -IoTDB> select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +-----------------+---------------------+----------------+-----------------------+------------------+ | Device|max_time(temperature)|max_time(status)|last_value(temperature)|last_value(status)| +-----------------+---------------------+----------------+-----------------------+------------------+ @@ -454,7 +464,7 @@ The supported operators are as follows: ### 3.1 Time Filter -Use time filters to filter data for a specific time range. For supported formats of timestamps, please refer to [Timestamp](../Background-knowledge/Data-Type_apache.md) . +Use time filters to filter data for a specific time range. For supported formats of timestamps, please refer to [Timestamp](../Background-knowledge/Data-Type.md) . An example is as follows: @@ -547,8 +557,10 @@ In the value filter condition, for TEXT type data, use `Like` and `Regexp` opera **Example 1:** Query data containing `'cc'` in `value` under `root.sg.d1`. +```sql +select * from root.sg.d1 where value like '%cc%' +``` ``` -IoTDB> select * from root.sg.d1 where value like '%cc%' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -561,8 +573,10 @@ It costs 0.002s **Example 2:** Query data that consists of 3 characters and the second character is `'b'` in `value` under `root.sg.d1`. +```sql +select * from root.sg.device where value like '_b_' +``` ``` -IoTDB> select * from root.sg.device where value like '_b_' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -587,8 +601,10 @@ Beginning with a: ^a.* **Example 1:** Query a string composed of 26 English characters for the value under root.sg.d1 +```sql +select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +``` ``` -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -601,8 +617,10 @@ It costs 0.002s **Example 2:** Query root.sg.d1 where the value value is a string composed of 26 lowercase English characters and the time is greater than 100 +```sql +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +``` ``` -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -1607,16 +1625,16 @@ you can use the `HAVING` clause after the `GROUP BY` clause. > The following usages are incorrect: > > ```sql -> select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 -> select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 +> select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +> select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; > ``` > > 2.When filtering the `GROUP BY LEVEL` result, the PATH in `SELECT` and `HAVING` can only have one node. > The following usages are incorrect: > > ```sql -> select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 -> select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +> select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +> select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; > ``` Here are a few examples of using the 'HAVING' clause to filter aggregate results. @@ -2727,8 +2745,10 @@ For examples: - **Example 1** (aligned by time) -```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +```sql +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +``` +``` +--------------+-------------------+--------+ | source column| target timeseries| written| +--------------+-------------------+--------+ @@ -2757,8 +2777,10 @@ We can see that the writing of the `INTO` clause is very flexible as long as the - **Example 2** (aligned by time) -```shell -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +```sql +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +``` +``` +--------------------------------------+-------------------------+--------+ | source column| target timeseries| written| +--------------------------------------+-------------------------+--------+ @@ -2774,8 +2796,10 @@ This statement stores the results of an aggregated query into the specified time - **Example 3** (aligned by device) -```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +```sql +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +``` +``` +--------------+--------------+-------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+-------------------+--------+ @@ -2797,8 +2821,10 @@ This statement also writes the query results of the four time series under the ` - **Example 4** (aligned by device) -```shell -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +```sql +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +``` +``` +--------------+--------------+------------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+------------------------+--------+ @@ -2948,7 +2974,7 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root #### Other points to note - For general aggregation queries, the timestamp is meaningless, and the convention is to use 0 to store. -- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type_apache.md). +- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type.md). - When the target time series does not exist, the system automatically creates it (including the database). - When the queried time series does not exist, or the queried sequence does not have data, the target time series will not be created automatically. @@ -2958,8 +2984,10 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root ETL the original data and write a new time series. -```shell -IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +```sql +SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +``` +``` +--------------+-------------------+---------------------------+--------+ | source device| source column| target timeseries| written| +--------------+-------------------+---------------------------+--------+ @@ -2977,8 +3005,10 @@ IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) Persistently store the query results, which acts like a materialized view. -```shell -IOTDB > SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +```sql +SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +``` +``` +--------------------------+-----------------------------+--------+ | source column| target timeseries| written| +--------------------------+-----------------------------+--------+ @@ -2996,8 +3026,10 @@ Rewrite non-aligned time series into another aligned time series. **Note:** It is recommended to use the `LIMIT & OFFSET` clause or the `WHERE` clause (time filter) to batch data to prevent excessive data volume in a single operation. -```shell -IOTDB > SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +```sql +SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +``` +``` +--------------------------+----------------------+--------+ | source column| target timeseries| written| +--------------------------+----------------------+--------+ diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md b/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md index 72e365ab4..a1a6a368c 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md @@ -114,7 +114,7 @@ SELECT [LAST] selectExpr [, selectExpr] ... The SQL statement is: ```sql -select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` which means: @@ -374,8 +374,10 @@ which means: Query and return the last data points of timeseries prefixPath.path **Example 1:** get the last point of root.ln.wf01.wt01.status: +```sql +select last status from root.ln.wf01.wt01; +``` ``` -IoTDB> select last status from root.ln.wf01.wt01 +-----------------------------+------------------------+-----+--------+ | Time| timeseries|value|dataType| +-----------------------------+------------------------+-----+--------+ @@ -387,8 +389,10 @@ It costs 0.000s **Example 2:** get the last status and temperature points of root.ln.wf01.wt01, whose timestamp larger or equal to 2017-11-07T23:50:00。 +```sql +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; +``` ``` -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -401,8 +405,10 @@ It costs 0.002s **Example 3:** get the last points of all sensor in root.ln.wf01.wt01, and order the result by the timeseries column in descending order +```sql +select last * from root.ln.wf01.wt01 order by timeseries desc; +``` ``` -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -415,8 +421,10 @@ It costs 0.002s **Example 4:** get the last points of all sensor in root.ln.wf01.wt01, and order the result by the dataType column in descending order +```sql +select last * from root.ln.wf01.wt01 order by dataType desc; +``` ``` -IoTDB> select last * from root.ln.wf01.wt01 order by dataType desc; +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -428,9 +436,10 @@ It costs 0.002s ``` **Note:** The requirement to query the latest data point with other filtering conditions can be implemented through function composition. For example: - +```sql +select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device; +``` ``` -IoTDB> select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +-----------------+---------------------+----------------+-----------------------+------------------+ | Device|max_time(temperature)|max_time(status)|last_value(temperature)|last_value(status)| +-----------------+---------------------+----------------+-----------------------+------------------+ @@ -454,7 +463,7 @@ The supported operators are as follows: ### 3.1 Time Filter -Use time filters to filter data for a specific time range. For supported formats of timestamps, please refer to [Timestamp](../Background-knowledge/Data-Type_timecho.md) . +Use time filters to filter data for a specific time range. For supported formats of timestamps, please refer to [Timestamp](../Background-knowledge/Data-Type.md) . An example is as follows: @@ -547,8 +556,10 @@ In the value filter condition, for TEXT type data, use `Like` and `Regexp` opera **Example 1:** Query data containing `'cc'` in `value` under `root.sg.d1`. +```sql +select * from root.sg.d1 where value like '%cc%' +``` ``` -IoTDB> select * from root.sg.d1 where value like '%cc%' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -561,8 +572,10 @@ It costs 0.002s **Example 2:** Query data that consists of 3 characters and the second character is `'b'` in `value` under `root.sg.d1`. +```sql +select * from root.sg.device where value like '_b_'; +``` ``` -IoTDB> select * from root.sg.device where value like '_b_' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -587,8 +600,10 @@ Beginning with a: ^a.* **Example 1:** Query a string composed of 26 English characters for the value under root.sg.d1 +```sql +select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +``` ``` -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -601,8 +616,10 @@ It costs 0.002s **Example 2:** Query root.sg.d1 where the value value is a string composed of 26 lowercase English characters and the time is greater than 100 +```sql +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +``` ``` -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -1607,16 +1624,16 @@ you can use the `HAVING` clause after the `GROUP BY` clause. > The following usages are incorrect: > > ```sql -> select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 -> select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 +> select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +> select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; > ``` > > 2.When filtering the `GROUP BY LEVEL` result, the PATH in `SELECT` and `HAVING` can only have one node. > The following usages are incorrect: > > ```sql -> select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 -> select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +> select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +> select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; > ``` Here are a few examples of using the 'HAVING' clause to filter aggregate results. @@ -1638,7 +1655,7 @@ Aggregation result 1: Aggregation result filtering query 1: ```sql - select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1 + select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1; ``` Filtering result 1: @@ -1675,7 +1692,7 @@ Aggregation result 2: Aggregation result filtering query 2: ```sql - select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device + select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` Filtering result 2: @@ -1706,7 +1723,7 @@ In IoTDB, users can use the FILL clause to specify the fill mode when data is mi **The following is the syntax definition of the `FILL` clause:** ```sql -FILL '(' PREVIOUS | LINEAR | constant ')' +FILL '(' PREVIOUS | LINEAR | constant ')'; ``` **Note:** @@ -2286,7 +2303,7 @@ It costs 0.005s If the parameter N/SN of LIMIT/SLIMIT clause exceeds the allowable maximum value (N/SN is of type int64), the system prompts errors. For example, executing the following SQL statement: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 9223372036854775808 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 9223372036854775808; ``` The SQL statement will not be executed and the corresponding error prompt is given as follows: @@ -2298,7 +2315,7 @@ Msg: 416: Out of range. LIMIT : N should be Int64. If the parameter N/SN of LIMIT/SLIMIT clause is not a positive intege, the system prompts errors. For example, executing the following SQL statement: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 13.1 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 13.1; ``` The SQL statement will not be executed and the corresponding error prompt is given as follows: @@ -2310,7 +2327,7 @@ Msg: 401: line 1:129 mismatched input '.' expecting {, ';'} If the parameter OFFSET of LIMIT clause exceeds the size of the result set, IoTDB will return an empty result set. For example, executing the following SQL statement: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 2 offset 6 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 2 offset 6; ``` The result is shown below: @@ -2327,7 +2344,7 @@ It costs 0.005s If the parameter SOFFSET of SLIMIT clause is not smaller than the number of available timeseries, the system prompts errors. For example, executing the following SQL statement: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 2 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 2; ``` The SQL statement will not be executed and the corresponding error prompt is given as follows: @@ -2443,7 +2460,7 @@ The result below indicates `ORDER BY DEVICE ASC,TIME ASC` is the clause in defau Besides,`ALIGN BY DEVICE` and `ORDER BY` clauses can be used with aggregate query,the SQL statement is: ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` The result shows below: @@ -2491,7 +2508,7 @@ Here are several examples of queries for sorting arbitrary expressions using the When you need to sort the results based on the base score score, you can use the following SQL: ```Sql -select score from root.** order by score desc align by device +select score from root.** order by score desc align by device; ``` This will give you the following results: @@ -2543,7 +2560,7 @@ If you want to sort the results based on the total score and, in case of tied sc select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device + time desc align by device; ``` Here are the results: @@ -2571,7 +2588,7 @@ Here are the results: In the `ORDER BY` clause, you can also use aggregate query expressions. For example: ```Sql -select min_value(total) from root.** order by min_value(total) asc align by device +select min_value(total) from root.** order by min_value(total) asc align by device; ``` This will give you the following results: @@ -2591,7 +2608,7 @@ This will give you the following results: When specifying multiple columns in the query, the unsorted columns will change order along with the rows and sorted columns. The order of rows when the sorting columns are the same may vary depending on the specific implementation (no fixed order). For example: ```Sql -select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; ``` This will give you the following results: @@ -2612,7 +2629,7 @@ This will give you the following results: You can use both `ORDER BY DEVICE,TIME` and `ORDER BY EXPRESSION` together. For example: ```Sql -select score from root.** order by device asc, score desc, time asc align by device +select score from root.** order by device asc, score desc, time asc align by device; ``` This will give you the following results: @@ -2727,8 +2744,10 @@ For examples: - **Example 1** (aligned by time) -```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +```sql +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +``` +``` +--------------+-------------------+--------+ | source column| target timeseries| written| +--------------+-------------------+--------+ @@ -2757,8 +2776,10 @@ We can see that the writing of the `INTO` clause is very flexible as long as the - **Example 2** (aligned by time) -```shell -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +```sql +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +``` +``` +--------------------------------------+-------------------------+--------+ | source column| target timeseries| written| +--------------------------------------+-------------------------+--------+ @@ -2774,8 +2795,10 @@ This statement stores the results of an aggregated query into the specified time - **Example 3** (aligned by device) -```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +```sql +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +``` +``` +--------------+--------------+-------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+-------------------+--------+ @@ -2797,8 +2820,10 @@ This statement also writes the query results of the four time series under the ` - **Example 4** (aligned by device) -```shell -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +```sql +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +``` +``` +--------------+--------------+------------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+------------------------+--------+ @@ -2948,7 +2973,7 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root #### Other points to note - For general aggregation queries, the timestamp is meaningless, and the convention is to use 0 to store. -- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type_timecho.md). +- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type.md). - When the target time series does not exist, the system automatically creates it (including the database). - When the queried time series does not exist, or the queried sequence does not have data, the target time series will not be created automatically. @@ -2958,8 +2983,10 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root ETL the original data and write a new time series. -```shell -IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +```sql +SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +``` +``` +--------------+-------------------+---------------------------+--------+ | source device| source column| target timeseries| written| +--------------+-------------------+---------------------------+--------+ @@ -2977,8 +3004,10 @@ IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) Persistently store the query results, which acts like a materialized view. -```shell -IOTDB > SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +```sql +SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +``` +``` +--------------------------+-----------------------------+--------+ | source column| target timeseries| written| +--------------------------+-----------------------------+--------+ @@ -2996,8 +3025,10 @@ Rewrite non-aligned time series into another aligned time series. **Note:** It is recommended to use the `LIMIT & OFFSET` clause or the `WHERE` clause (time filter) to batch data to prevent excessive data volume in a single operation. -```shell -IOTDB > SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +```sql +SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +``` +``` +--------------------------+----------------------+--------+ | source column| target timeseries| written| +--------------------------+----------------------+--------+ diff --git a/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_apache.md b/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_apache.md index 2b4603537..7806b1a60 100644 --- a/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_apache.md +++ b/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_apache.md @@ -28,33 +28,33 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 1.1 Create Database ```sql -IoTDB > create database root.ln -IoTDB > create database root.sgcc +create database root.ln; +create database root.sgcc; ``` ### 1.2 Show Databases ```sql -IoTDB> SHOW DATABASES -IoTDB> SHOW DATABASES root.** +SHOW DATABASES; +SHOW DATABASES root.**; ``` ### 1.3 Delete Database ```sql -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// delete all data, all timeseries and all databases -IoTDB > DELETE DATABASE root.** +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// delete all data, all timeseries and all databases; +DELETE DATABASE root.**; ``` ### 1.4 Count Databases ```sql -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` ### 1.5 Setting up heterogeneous databases (Advanced operations) @@ -74,7 +74,7 @@ ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; #### Show heterogeneous databases ```sql -SHOW DATABASES DETAILS +SHOW DATABASES DETAILS; ``` ### 1.6 TTL @@ -82,25 +82,25 @@ SHOW DATABASES DETAILS #### Set TTL ```sql -IoTDB> set ttl to root.ln 3600000 -IoTDB> set ttl to root.sgcc.** 3600000 -IoTDB> set ttl to root.** 3600000 +set ttl to root.ln 3600000; +set ttl to root.sgcc.** 3600000; +set ttl to root.** 3600000; ``` #### Unset TTL ```sql -IoTDB> unset ttl from root.ln -IoTDB> unset ttl from root.sgcc.** -IoTDB> unset ttl from root.** +unset ttl from root.ln; +unset ttl from root.sgcc.**; +unset ttl from root.**; ``` #### Show TTL ```sql -IoTDB> SHOW ALL TTL -IoTDB> SHOW TTL ON StorageGroupNames -IoTDB> SHOW DEVICES +SHOW ALL TTL; +SHOW TTL ON StorageGroupNames; +SHOW DEVICES; ``` ## 2. DEVICE TEMPLATE @@ -120,13 +120,13 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad **Example 1:** Create a template containing two non-aligned timeseires ```sql -IoTDB> create device template t1 (temperature FLOAT, status BOOLEAN) +create device template t1 (temperature FLOAT, status BOOLEAN); ``` **Example 2:** Create a template containing a group of aligned timeseires ```sql -IoTDB> create device template t2 aligned (lat FLOAT, lon FLOAT) +create device template t2 aligned (lat FLOAT, lon FLOAT); ``` The` lat` and `lon` measurements are aligned. @@ -134,52 +134,52 @@ The` lat` and `lon` measurements are aligned. ### 2.2 Set Device Template ```sql -IoTDB> set device template t1 to root.sg1.d1 +set device template t1 to root.sg1.d1; ``` ### 2.3 Activate Device Template ```sql -IoTDB> set device template t1 to root.sg1.d1 -IoTDB> set device template t2 to root.sg1.d2 -IoTDB> create timeseries using device template on root.sg1.d1 -IoTDB> create timeseries using device template on root.sg1.d2 +set device template t1 to root.sg1.d1; +set device template t2 to root.sg1.d2; +create timeseries using device template on root.sg1.d1; +create timeseries using device template on root.sg1.d2; ``` ### 2.4 Show Device Template ```sql -IoTDB> show device templates -IoTDB> show nodes in device template t1 -IoTDB> show paths set device template t1 -IoTDB> show paths using device template t1 +show device templates; +show nodes in device template t1; +show paths set device template t1; +show paths using device template t1; ``` ### 2.5 Deactivate Device Template ```sql -IoTDB> delete timeseries of device template t1 from root.sg1.d1 -IoTDB> deactivate device template t1 from root.sg1.d1 -IoTDB> delete timeseries of device template t1 from root.sg1.*, root.sg2.* -IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* +delete timeseries of device template t1 from root.sg1.d1; +deactivate device template t1 from root.sg1.d1; +delete timeseries of device template t1 from root.sg1.*, root.sg2.*; +deactivate device template t1 from root.sg1.*, root.sg2.*; ``` ### 2.6 Unset Device Template ```sql -IoTDB> unset device template t1 from root.sg1.d1 +unset device template t1 from root.sg1.d1; ``` ### 2.7 Drop Device Template ```sql -IoTDB> drop device template t1 +drop device template t1; ``` ### 2.8 Alter Device Template ```sql -IoTDB> alter device template t1 add (speed FLOAT) +alter device template t1 add (speed FLOAT); ``` ## 3. TIMESERIES MANAGEMENT @@ -189,108 +189,108 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 3.1 Create Timeseries ```sql -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - From v0.13, you can use a simplified version of the SQL statements to create timeseries: ```sql -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - Notice that when in the CREATE TIMESERIES statement the encoding method conflicts with the data type, the system gives the corresponding error prompt as shown below: ```sql -IoTDB > create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN -error: encoding TS_2DIFF does not support BOOLEAN +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN; +error: encoding TS_2DIFF does not support BOOLEAN; ``` ### 3.2 Create Aligned Timeseries ```sql -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT) +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT); ``` ### 3.3 Delete Timeseries ```sql -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 3.4 Show Timeseries ```sql -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** -IoTDB> show timeseries root.ln.** limit 10 offset 10 -IoTDB> show timeseries root.ln.** where timeseries contains 'wf01.wt' -IoTDB> show timeseries root.ln.** where dataType=FLOAT +show timeseries root.**; +show timeseries root.ln.**; +show timeseries root.ln.** limit 10 offset 10; +show timeseries root.ln.** where timeseries contains 'wf01.wt'; +show timeseries root.ln.** where dataType=FLOAT; ``` ### 3.5 Count Timeseries ```sql -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' ; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` ### 3.6 Tag and Attribute Management ```sql -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` * Rename the tag/attribute key ```SQL -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` * Reset the tag/attribute value ```SQL -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` * Delete the existing tag/attribute ```SQL -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` * Add new tags ```SQL -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` * Add new attributes ```SQL -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` * Upsert alias, tags and attributes @@ -298,49 +298,51 @@ ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 > add alias or a new key-value if the alias or key doesn't exist, otherwise, update the old one with new value. ```SQL -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4); ``` * Show timeseries using tags. Use TAGS(tagKey) to identify the tags used as filter key ```SQL -SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause +SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; ``` returns all the timeseries information that satisfy the where condition and match the pathPattern. SQL statements are as follows: ```SQL -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` - count timeseries using tags ```SQL -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` returns all the number of timeseries that satisfy the where condition and match the pathPattern. SQL statements are as follows: ```SQL -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` create aligned timeseries ```SQL -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` The execution result is as follows: ```SQL -IoTDB> show timeseries +show timeseries; +``` +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -352,7 +354,9 @@ IoTDB> show timeseries Support query: ```SQL -IoTDB> show timeseries where TAGS(tag1)='v1' +show timeseries where TAGS(tag1)='v1'; +``` +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -369,40 +373,40 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 4.1 Show Child Paths ```SQL -SHOW CHILD PATHS pathPattern +SHOW CHILD PATHS pathPattern; ``` ### 4.2 Show Child Nodes ```SQL -SHOW CHILD NODES pathPattern +SHOW CHILD NODES pathPattern; ``` ### 4.3 Count Nodes ```SQL -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.** LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.** LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` ### 4.4 Show Devices ```SQL -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; +show devices with database; +show devices root.ln.** with database; ``` ### 4.5 Count Devices ```SQL -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +show devices; +count devices; +count devices root.ln.**; ``` ## 5. INSERT & LOAD DATA @@ -416,30 +420,30 @@ For more details, see document [Write-Data](../Basic-Concept/Write-Data_apache). - Insert Single Timeseries ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) -IoTDB > insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1') +insert into root.ln.wf02.wt02(timestamp,status) values(1,true); +insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'); ``` - Insert Multiple Timeseries ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (2, false, 'v2') -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4') +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (2, false, 'v2'); +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4'); ``` - Use the Current System Timestamp as the Timestamp of the Data Point ```SQL -IoTDB > insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2') +insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2'); ``` #### Insert Data Into Aligned Timeseries ```SQL -IoTDB > create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE) -IoTDB > insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1) -IoTDB > insert into root.sg1.d1(time, s1, s2) aligned values(2, 2, 2), (3, 3, 3) -IoTDB > select * from root.sg1.d1 +create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE); +insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1); +insert into root.sg1.d1(time, s1, s2) aligned values(2, 2, 2), (3, 3, 3); +select * from root.sg1.d1; ``` ### 5.2 Load External TsFile Tool @@ -476,34 +480,34 @@ For more details, see document [Write-Delete-Data](../Basic-Concept/Write-Data_a ### 6.1 Delete Single Timeseries ```sql -IoTDB > delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.status where time < 10 -IoTDB > delete from root.ln.wf02.wt02.status where time <= 10 -IoTDB > delete from root.ln.wf02.wt02.status where time < 20 and time > 10 -IoTDB > delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10 -IoTDB > delete from root.ln.wf02.wt02.status where time > 20 -IoTDB > delete from root.ln.wf02.wt02.status where time >= 20 -IoTDB > delete from root.ln.wf02.wt02.status where time = 20 -IoTDB > delete from root.ln.wf02.wt02.status where time > 4 or time < 0 -Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic -expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' -IoTDB > delete from root.ln.wf02.wt02.status +delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.status where time < 10; +delete from root.ln.wf02.wt02.status where time <= 10; +delete from root.ln.wf02.wt02.status where time < 20 and time > 10; +delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10; +delete from root.ln.wf02.wt02.status where time > 20; +delete from root.ln.wf02.wt02.status where time >= 20; +delete from root.ln.wf02.wt02.status where time = 20; +delete from root.ln.wf02.wt02.status where time > 4 or time < 0; +Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic; +expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND'; +delete from root.ln.wf02.wt02.status; ``` ### 6.2 Delete Multiple Timeseries ```sql -IoTDB > delete from root.ln.wf02.wt02 where time <= 2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; -IoTDB> delete from root.ln.wf03.wt02.status where time < now() +delete from root.ln.wf02.wt02 where time <= 2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; +delete from root.ln.wf03.wt02.status where time < now(); Msg: The statement is executed successfully. ``` ### 6.3 Delete Time Partition (experimental) ```sql -IoTDB > DELETE PARTITION root.ln 0,1,2 +DELETE PARTITION root.ln 0,1,2; ``` ## 7. QUERY DATA @@ -537,31 +541,31 @@ SELECT [LAST] selectExpr [, selectExpr] ... #### Select a Column of Data Based on a Time Interval ```sql -IoTDB > select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` #### Select Multiple Columns of Data Based on a Time Interval ```sql -IoTDB > select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; +select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; ``` #### Select Multiple Columns of Data for the Same Device According to Multiple Time Intervals ```sql -IoTDB > select status,temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); +select status,temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); ``` #### Choose Multiple Columns of Data for Different Devices According to Multiple Time Intervals ```sql -IoTDB > select wf01.wt01.status,wf02.wt02.hardware from root.ln where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); +select wf01.wt01.status,wf02.wt02.hardware from root.ln where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); ``` #### Order By Time Query ```sql -IoTDB > select * from root.ln.** where time > 1 order by time desc limit 10; +select * from root.ln.** where time > 1 order by time desc limit 10; ``` ### 7.2 `SELECT` CLAUSE @@ -569,7 +573,7 @@ IoTDB > select * from root.ln.** where time > 1 order by time desc limit 10; #### Use Alias ```sql -IoTDB > select s1 as temperature, s2 as speed from root.ln.wf01.wt01; +select s1 as temperature, s2 as speed from root.ln.wf01.wt01; ``` #### Nested Expressions @@ -577,35 +581,35 @@ IoTDB > select s1 as temperature, s2 as speed from root.ln.wf01.wt01; ##### Nested Expressions with Time Series Query ```sql -IoTDB > select a, +select a, b, ((a + 1) * 2 - 1) % 2 + 1.5, sin(a + sin(a + sin(b))), -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 from root.sg1; -IoTDB > select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; -IoTDB > select (a + *) / 2 from root.sg1 +select (a + *) / 2 from root.sg1; -IoTDB > select (a + b) * 3 from root.sg, root.ln +select (a + b) * 3 from root.sg, root.ln; ``` ##### Nested Expressions query with aggregations ```sql -IoTDB > select avg(temperature), +select avg(temperature), sin(avg(temperature)), avg(temperature) + 1, -sum(hardware), avg(temperature) + sum(hardware) from root.ln.wf01.wt01; -IoTDB > select avg(*), +select avg(*), (avg(*) + 1) * 3 / 2 -1 -from root.sg1 +from root.sg1; -IoTDB > select avg(temperature), +select avg(temperature), sin(avg(temperature)), avg(temperature) + 1, -sum(hardware), @@ -617,10 +621,10 @@ GROUP BY([10, 90), 10ms); #### Last Query ```sql -IoTDB > select last status from root.ln.wf01.wt01 -IoTDB > select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 -IoTDB > select last * from root.ln.wf01.wt01 order by timeseries desc; -IoTDB > select last * from root.ln.wf01.wt01 order by dataType desc; +select last status from root.ln.wf01.wt01; +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; +select last * from root.ln.wf01.wt01 order by timeseries desc; +select last * from root.ln.wf01.wt01 order by dataType desc; ``` ### 7.3 `WHERE` CLAUSE @@ -628,22 +632,22 @@ IoTDB > select last * from root.ln.wf01.wt01 order by dataType desc; #### Time Filter ```sql -IoTDB > select s1 from root.sg1.d1 where time > 2022-01-01T00:05:00.000; -IoTDB > select s1 from root.sg1.d1 where time = 2022-01-01T00:05:00.000; -IoTDB > select s1 from root.sg1.d1 where time >= 2022-01-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; +select s1 from root.sg1.d1 where time > 2022-01-01T00:05:00.000; +select s1 from root.sg1.d1 where time = 2022-01-01T00:05:00.000; +select s1 from root.sg1.d1 where time >= 2022-01-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; ``` #### Value Filter ```sql -IoTDB > select temperature from root.sg1.d1 where temperature > 36.5; -IoTDB > select status from root.sg1.d1 where status = true; -IoTDB > select temperature from root.sg1.d1 where temperature between 36.5 and 40; -IoTDB > select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -IoTDB > select code from root.sg1.d1 where code in ('200', '300', '400', '500'); -IoTDB > select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); -IoTDB > select code from root.sg1.d1 where temperature is null; -IoTDB > select code from root.sg1.d1 where temperature is not null; +select temperature from root.sg1.d1 where temperature > 36.5; +select status from root.sg1.d1 where status = true; +select temperature from root.sg1.d1 where temperature between 36.5 and 40; +select temperature from root.sg1.d1 where temperature not between 36.5 and 40; +select code from root.sg1.d1 where code in ('200', '300', '400', '500'); +select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); +select code from root.sg1.d1 where temperature is null; +select code from root.sg1.d1 where temperature is not null; ``` #### Fuzzy Query @@ -651,15 +655,15 @@ IoTDB > select code from root.sg1.d1 where temperature is not null; - Fuzzy matching using `Like` ```sql -IoTDB > select * from root.sg.d1 where value like '%cc%' -IoTDB > select * from root.sg.device where value like '_b_' +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; ``` - Fuzzy matching using `Regexp` ```sql -IoTDB > select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -IoTDB > select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; ``` ### 7.4 `GROUP BY` CLAUSE @@ -667,91 +671,91 @@ IoTDB > select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 - Aggregate By Time without Specifying the Sliding Step Length ```sql -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d); +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d); ``` - Aggregate By Time Specifying the Sliding Step Length ```sql -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d); +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d); ``` - Aggregate by Natural Month ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by([2017-11-01T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); -IoTDB > select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); +select count(status) from root.ln.wf01.wt01 group by([2017-11-01T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); +select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); ``` - Left Open And Right Close Range ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d); +select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d); ``` - Aggregation By Variation ```sql -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10); ``` - Aggregation By Condition ```sql -IoTDB > select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=true) -IoTDB > select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=true); +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=false); ``` - Aggregation By Session ```sql -IoTDB > select __endTime,count(*) from root.** group by session(1d) -IoTDB > select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,count(*) from root.** group by session(1d); +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` - Aggregation By Count ```sql -IoTDB > select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) -IoTDB > select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` - Aggregation By Level ```sql -IoTDB > select count(status) from root.** group by level = 1 -IoTDB > select count(status) from root.** group by level = 3 -IoTDB > select count(status) from root.** group by level = 1, 3 -IoTDB > select max_value(temperature) from root.** group by level = 0 -IoTDB > select count(*) from root.ln.** group by level = 2 +select count(status) from root.** group by level = 1; +select count(status) from root.** group by level = 3; +select count(status) from root.** group by level = 1, 3; +select max_value(temperature) from root.** group by level = 0; +select count(*) from root.ln.** group by level = 2; ``` - Aggregate By Time with Level Clause ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d), level=1; -IoTDB > select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; +select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d), level=1; +select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; ``` - Aggregation query by one single tag ```sql -IoTDB > SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); +SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); ``` - Aggregation query by multiple tags ```sql -IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); +SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); ``` - Downsampling Aggregation by tags based on Time Window ```sql -IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS(city, workshop); +SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS(city, workshop); ``` ### 7.5 `HAVING` CLAUSE @@ -759,17 +763,17 @@ IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5 Correct: ```sql -IoTDB > select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1 -IoTDB > select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device +select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1; +select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` Incorrect: ```sql -IoTDB > select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 -IoTDB > select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 -IoTDB > select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 -IoTDB > select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; +select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` ### 7.6 `FILL` CLAUSE @@ -777,7 +781,7 @@ IoTDB > select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having su #### `PREVIOUS` Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(previous); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(previous); ``` #### `PREVIOUS` FILL and specify the fill timeout threshold @@ -788,14 +792,14 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: #### `LINEAR` Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(linear); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(linear); ``` #### Constant Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(2.0); -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(true); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(2.0); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(true); ``` ### 7.7 `LIMIT` and `SLIMIT` CLAUSES (PAGINATION) @@ -803,24 +807,24 @@ IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-1 #### Row Control over Query Results ```sql -IoTDB > select status, temperature from root.ln.wf01.wt01 limit 10 -IoTDB > select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 -IoTDB > select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 2 offset 3 -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 10; +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 2 offset 3; +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 5 offset 3; ``` #### Column Control over Query Results ```sql -IoTDB > select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 -IoTDB > select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 -IoTDB > select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` #### Row and Column Control over Query Results ```sql -IoTDB > select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` ### 7.8 `ORDER BY` CLAUSE @@ -828,31 +832,31 @@ IoTDB > select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 #### Order by in ALIGN BY TIME mode ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; ``` #### Order by in ALIGN BY DEVICE mode ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc,time asc align by device; -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; -IoTDB > select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc,time asc align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` #### Order by arbitrary expressions ```sql -IoTDB > select score from root.** order by score desc align by device -IoTDB > select score,total from root.one order by base+score+bonus desc -IoTDB > select score,total from root.one order by total desc -IoTDB > select base, score, bonus, total from root.** order by total desc NULLS Last, +select score from root.** order by score desc align by device; +select score,total from root.one order by base+score+bonus desc; +select score,total from root.one order by total desc; +select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device -IoTDB > select min_value(total) from root.** order by min_value(total) asc align by device -IoTDB > select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device -IoTDB > select score from root.** order by device asc, score desc, time asc align by device + time desc align by device; +select min_value(total) from root.** order by min_value(total) asc align by device; +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; +select score from root.** order by device asc, score desc, time asc align by device; ``` ### 7.9 `ALIGN BY` CLAUSE @@ -860,54 +864,54 @@ IoTDB > select score from root.** order by device asc, score desc, time asc alig #### Align by Device ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` ### 7.10 `INTO` CLAUSE (QUERY WRITE-BACK) ```sql -IoTDB > select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; -IoTDB > select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); -IoTDB > select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; -IoTDB > select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; ``` - Using variable placeholders: ```sql -IoTDB > select s1, s2 +select s1, s2 into root.sg_copy.d1(::), root.sg_copy.d2(s1), root.sg_copy.d1(${3}), root.sg_copy.d2(::) from root.sg.d1, root.sg.d2; -IoTDB > select d1.s1, d1.s2, d2.s3, d3.s4 +select d1.s1, d1.s2, d2.s3, d3.s4 into ::(s1_1, s2_2), root.sg.d2_2(s3_3), root.${2}_copy.::(s4) from root.sg; -IoTDB > select * into root.sg_bk.::(::) from root.sg.**; +select * into root.sg_bk.::(::) from root.sg.**; -IoTDB > select s1, s2, s3, s4 +select s1, s2, s3, s4 into root.backup_sg.d1(s1, s2, s3, s4), root.backup_sg.d2(::), root.sg.d3(backup_${4}) from root.sg.d1, root.sg.d2, root.sg.d3 align by device; -IoTDB > select avg(s1), sum(s2) + sum(s3), count(s4) +select avg(s1), sum(s2) + sum(s3), count(s4) into root.agg_${2}.::(avg_s1, sum_s2_add_s3, count_s4) from root.** align by device; -IoTDB > select * into ::(backup_${4}) from root.sg.** align by device; +select * into ::(backup_${4}) from root.sg.** align by device; -IoTDB > select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` ## 8. Maintennance Generate the corresponding query plan: ```sql -explain select s1,s2 from root.sg.d1 +explain select s1,s2 from root.sg.d1; ``` Execute the corresponding SQL, analyze the execution and output: ```sql -explain analyze select s1,s2 from root.sg.d1 order by s1 +explain analyze select s1,s2 from root.sg.d1 order by s1; ``` ## 9. OPERATOR @@ -918,7 +922,7 @@ For more details, see document [Operator-and-Expression](./Operator-and-Expressi For details and examples, see the document [Arithmetic Operators and Functions](./Operator-and-Expression.md#_1-1-arithmetic-operators). ```sql -select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1 +select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1; ``` ### 9.2 Comparison Operators @@ -934,12 +938,12 @@ select temperature from root.sg1.d1 where temperature between 36.5 and 40; select temperature from root.sg1.d1 where temperature not between 36.5 and 40; # Fuzzy matching operator: Use `Like` for fuzzy matching -select * from root.sg.d1 where value like '%cc%' -select * from root.sg.device where value like '_b_' +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; # Fuzzy matching operator: Use `Regexp` for fuzzy matching -select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; select b, b like '1%', b regexp '[0-2]' from root.test; # `IS NULL` operator @@ -1002,25 +1006,25 @@ For details and examples, see the document [String Processing](./Operator-and-Ex ```sql select s1, string_contains(s1, 's'='warn') from root.sg1.d4; select s1, string_matches(s1, 'regex'='[^\\s]+37229') from root.sg1.d4; -select s1, length(s1) from root.sg1.d1 -select s1, locate(s1, "target"="1") from root.sg1.d1 -select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 -select s1, startswith(s1, "target"="1") from root.sg1.d1 -select s1, endswith(s1, "target"="1") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 -select s1, substring(s1 from 1 for 2) from root.sg1.d1 -select s1, replace(s1, 'es', 'tt') from root.sg1.d1 -select s1, upper(s1) from root.sg1.d1 -select s1, lower(s1) from root.sg1.d1 -select s3, trim(s3) from root.sg1.d1 -select s1, s2, strcmp(s1, s2) from root.sg1.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1 -select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 +select s1, length(s1) from root.sg1.d1; +select s1, locate(s1, "target"="1") from root.sg1.d1; +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1; +select s1, startswith(s1, "target"="1") from root.sg1.d1; +select s1, endswith(s1, "target"="1") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1; +select s1, substring(s1 from 1 for 2) from root.sg1.d1; +select s1, replace(s1, 'es', 'tt') from root.sg1.d1; +select s1, upper(s1) from root.sg1.d1; +select s1, lower(s1) from root.sg1.d1; +select s3, trim(s3) from root.sg1.d1; +select s1, s2, strcmp(s1, s2) from root.sg1.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1; +select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1; +select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1; ``` ### 10.5 Data Type Conversion Function @@ -1028,7 +1032,7 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 For details and examples, see the document [Data Type Conversion Function](./Operator-and-Expression.md#_2-5-data-type-conversion-function). ```sql -SELECT cast(s1 as INT32) from root.sg +SELECT cast(s1 as INT32) from root.sg; ``` ### 10.6 Constant Timeseries Generating Functions @@ -1076,8 +1080,8 @@ select equal_size_bucket_agg_sample(temperature, 'type'='avg','proportion'='0.1' select equal_size_bucket_m4_sample(temperature, 'proportion'='0.1') as M4_sample from root.ln.wf01.wt01; select equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='avg', 'number'='2') as outlier_avg_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='stendis', 'number'='2') as outlier_stendis_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='cos', 'number'='2') as outlier_cos_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='prenextdis', 'number'='2') as outlier_prenextdis_sample from root.ln.wf01.wt01; -select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1 -select M4(s1,'windowSize'='10') from root.vehicle.d1 +select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1; +select M4(s1,'windowSize'='10') from root.vehicle.d1; ``` ### 10.11 Change Points Function @@ -1085,7 +1089,7 @@ select M4(s1,'windowSize'='10') from root.vehicle.d1 For details and examples, see the document [Time-Series](./Operator-and-Expression.md#_2-11-change-points-function). ```sql -select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1 +select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1; ``` ## 11. DATA QUALITY FUNCTION LIBRARY @@ -1098,23 +1102,23 @@ For details and examples, see the document [Data-Quality](../SQL-Manual/UDF-Libr ```sql # Completeness -select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; # Consistency -select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; # Timeliness -select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; # Validity -select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; # Accuracy -select Accuracy(t1,t2,t3,m1,m2,m3) from root.test +select Accuracy(t1,t2,t3,m1,m2,m3) from root.test; ``` ### 11.2 Data Profiling @@ -1123,78 +1127,78 @@ For details and examples, see the document [Data-Profiling](../SQL-Manual/UDF-Li ```sql # ACF -select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05 +select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05; # Distinct -select distinct(s2) from root.test.d2 +select distinct(s2) from root.test.d2; # Histogram -select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1 +select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1; # Integral -select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 -select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10 +select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; +select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10; # IntegralAvg -select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 +select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; # Mad -select mad(s0) from root.test -select mad(s0, "error"="0.01") from root.test +select mad(s0) from root.test; +select mad(s0, "error"="0.01") from root.test; # Median -select median(s0, "error"="0.01") from root.test +select median(s0, "error"="0.01") from root.test; # MinMax -select minmax(s1) from root.test +select minmax(s1) from root.test; # Mode -select mode(s2) from root.test.d2 +select mode(s2) from root.test.d2; # MvAvg -select mvavg(s1, "window"="3") from root.test +select mvavg(s1, "window"="3") from root.test; # PACF -select pacf(s1, "lag"="5") from root.test +select pacf(s1, "lag"="5") from root.test; # Percentile -select percentile(s0, "rank"="0.2", "error"="0.01") from root.test +select percentile(s0, "rank"="0.2", "error"="0.01") from root.test; # Quantile -select quantile(s0, "rank"="0.2", "K"="800") from root.test +select quantile(s0, "rank"="0.2", "K"="800") from root.test; # Period -select period(s1) from root.test.d3 +select period(s1) from root.test.d3; # QLB -select QLB(s1) from root.test.d1 +select QLB(s1) from root.test.d1; # Resample -select resample(s1,'every'='5m','interp'='linear') from root.test.d1 -select resample(s1,'every'='30m','aggr'='first') from root.test.d1 -select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1 +select resample(s1,'every'='5m','interp'='linear') from root.test.d1; +select resample(s1,'every'='30m','aggr'='first') from root.test.d1; +select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1; # Sample -select sample(s1,'method'='reservoir','k'='5') from root.test.d1 -select sample(s1,'method'='isometric','k'='5') from root.test.d1 +select sample(s1,'method'='reservoir','k'='5') from root.test.d1; +select sample(s1,'method'='isometric','k'='5') from root.test.d1; # Segment -select segment(s1, "error"="0.1") from root.test +select segment(s1, "error"="0.1") from root.test; # Skew -select skew(s1) from root.test.d1 +select skew(s1) from root.test.d1; # Spline -select spline(s1, "points"="151") from root.test +select spline(s1, "points"="151") from root.test; # Spread -select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 +select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; # Stddev -select stddev(s1) from root.test.d1 +select stddev(s1) from root.test.d1; # ZScore -select zscore(s1) from root.test +select zscore(s1) from root.test; ``` ### 11.3 Anomaly Detection @@ -1203,33 +1207,33 @@ For details and examples, see the document [Anomaly-Detection](../SQL-Manual/UDF ```sql # IQR -select iqr(s1) from root.test +select iqr(s1) from root.test; # KSigma -select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30; # LOF -select lof(s1,s2) from root.test.d1 where time<1000 -select lof(s1, "method"="series") from root.test.d1 where time<1000 +select lof(s1,s2) from root.test.d1 where time<1000; +select lof(s1, "method"="series") from root.test.d1 where time<1000; # MissDetect -select missdetect(s2,'minlen'='10') from root.test.d2 +select missdetect(s2,'minlen'='10') from root.test.d2; # Range -select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30; # TwoSidedFilter -select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test +select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test; # Outlier -select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test +select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test; # MasterTrain -select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test +select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test; # MasterDetect -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test; ``` ### 11.4 Frequency Domain @@ -1238,30 +1242,30 @@ For details and examples, see the document [Frequency-Domain](../SQL-Manual/UDF- ```sql # Conv -select conv(s1,s2) from root.test.d2 +select conv(s1,s2) from root.test.d2; # Deconv -select deconv(s3,s2) from root.test.d2 -select deconv(s3,s2,'result'='remainder') from root.test.d2 +select deconv(s3,s2) from root.test.d2; +select deconv(s3,s2,'result'='remainder') from root.test.d2; # DWT -select dwt(s1,"method"="haar") from root.test.d1 +select dwt(s1,"method"="haar") from root.test.d1; # FFT -select fft(s1) from root.test.d1 -select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1 +select fft(s1) from root.test.d1; +select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1; # HighPass -select highpass(s1,'wpass'='0.45') from root.test.d1 +select highpass(s1,'wpass'='0.45') from root.test.d1; # IFFT -select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1 +select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1; # LowPass -select lowpass(s1,'wpass'='0.45') from root.test.d1 +select lowpass(s1,'wpass'='0.45') from root.test.d1; # Envelope -select envelope(s1) from root.test.d1 +select envelope(s1) from root.test.d1; ``` ### 11.5 Data Matching @@ -1270,19 +1274,19 @@ For details and examples, see the document [Data-Matching](../SQL-Manual/UDF-Lib ```sql # Cov -select cov(s1,s2) from root.test.d2 +select cov(s1,s2) from root.test.d2; # DTW -select dtw(s1,s2) from root.test.d2 +select dtw(s1,s2) from root.test.d2; # Pearson -select pearson(s1,s2) from root.test.d2 +select pearson(s1,s2) from root.test.d2; # PtnSym -select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1 +select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1; # XCorr -select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 +select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05; ``` ### 11.6 Data Repairing @@ -1291,23 +1295,23 @@ For details and examples, see the document [Data-Repairing](../SQL-Manual/UDF-Li ```sql # TimestampRepair -select timestamprepair(s1,'interval'='10000') from root.test.d2 -select timestamprepair(s1) from root.test.d2 +select timestamprepair(s1,'interval'='10000') from root.test.d2; +select timestamprepair(s1) from root.test.d2; # ValueFill -select valuefill(s1) from root.test.d2 -select valuefill(s1,"method"="previous") from root.test.d2 +select valuefill(s1) from root.test.d2; +select valuefill(s1,"method"="previous") from root.test.d2; # ValueRepair -select valuerepair(s1) from root.test.d2 -select valuerepair(s1,'method'='LsGreedy') from root.test.d2 +select valuerepair(s1) from root.test.d2; +select valuerepair(s1,'method'='LsGreedy') from root.test.d2; # MasterRepair -select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test +select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test; # SeasonalRepair -select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2 -select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 +select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2; +select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2; ``` ### 11.7 Series Discovery @@ -1316,11 +1320,11 @@ For details and examples, see the document [Series-Discovery](../SQL-Manual/UDF- ```sql # ConsecutiveSequences -select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1 -select consecutivesequences(s1,s2) from root.test.d1 +select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1; +select consecutivesequences(s1,s2) from root.test.d1; # ConsecutiveWindows -select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 +select consecutivewindows(s1,s2,'length'='10m') from root.test.d1; ``` ### 11.8 Machine Learning @@ -1329,13 +1333,13 @@ For details and examples, see the document [Machine-Learning](../SQL-Manual/UDF- ```sql # AR -select ar(s0,"p"="2") from root.test.d0 +select ar(s0,"p"="2") from root.test.d0; # Representation -select representation(s0,"tb"="3","vb"="2") from root.test.d0 +select representation(s0,"tb"="3","vb"="2") from root.test.d0; # RM -select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 +select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0; ``` ## 12. CONDITIONAL EXPRESSION @@ -1348,24 +1352,24 @@ when 1000=1050 then "bad temperature" when P<=1000000 or P>=1100000 then "bad pressure" end as `result` -from root.test1 +from root.test1; select str, case when str like "%cc%" then "has cc" when str like "%dd%" then "has dd" else "no cc and dd" end as `result` -from root.test2 +from root.test2; select count(case when x<=1 then 1 end) as `(-∞,1]`, count(case when 1 +DROP (CONTINUOUS QUERY | CQ) ; ``` #### Altering continuous queries @@ -1557,9 +1561,9 @@ DROP FUNCTION ### 15.3 UDF Queries ```sql -SELECT example(*) from root.sg.d1 -SELECT example(s1, *) from root.sg.d1 -SELECT example(*, *) from root.sg.d1 +SELECT example(*) from root.sg.d1; +SELECT example(s1, *) from root.sg.d1; +SELECT example(*, *) from root.sg.d1; SELECT example(s1, 'key1'='value1', 'key2'='value2'), example(*, 'key3'='value3') FROM root.sg.d1; SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; @@ -1585,43 +1589,43 @@ For more details, see document [Authority Management](../User-Manual/Authority-M - Create user (Requires MANAGE_USER permission) ```SQL -CREATE USER -eg: CREATE USER user1 'passwd' +CREATE USER ; +eg: CREATE USER user1 'passwd'; ``` - Delete user (Requires MANAGE_USER permission) ```sql -DROP USER -eg: DROP USER user1 +DROP USER ; +eg: DROP USER user1; ``` - Create role (Requires MANAGE_ROLE permission) ```sql -CREATE ROLE -eg: CREATE ROLE role1 +CREATE ROLE ; +eg: CREATE ROLE role1; ``` - Delete role (Requires MANAGE_ROLE permission) ```sql -DROP ROLE -eg: DROP ROLE role1 +DROP ROLE ; +eg: DROP ROLE role1; ``` - Grant role to user (Requires MANAGE_ROLE permission) ```sql -GRANT ROLE TO -eg: GRANT ROLE admin TO user1 +GRANT ROLE TO ; +eg: GRANT ROLE admin TO user1; ``` - Revoke role from user(Requires MANAGE_ROLE permission) ```sql -REVOKE ROLE FROM -eg: REVOKE ROLE admin FROM user1 +REVOKE ROLE FROM ; +eg: REVOKE ROLE admin FROM user1; ``` - List all user (Requires MANAGE_USER permission) @@ -1639,15 +1643,15 @@ LIST ROLE - List all users granted specific role.(Requires MANAGE_USER permission) ```sql -LIST USER OF ROLE -eg: LIST USER OF ROLE roleuser +LIST USER OF ROLE ; +eg: LIST USER OF ROLE roleuser; ``` - List all role granted to specific user. ```sql -LIST ROLE OF USER -eg: LIST ROLE OF USER tempuser +LIST ROLE OF USER ; +eg: LIST ROLE OF USER tempuser; ``` - List all privileges of user @@ -1696,13 +1700,13 @@ eg: REVOKE ALL ON root.** FROM USER user1; #### Delete Time Partition (experimental) ```sql -Eg: IoTDB > DELETE PARTITION root.ln 0,1,2 +Eg: DELETE PARTITION root.ln 0,1,2; ``` #### Continuous Query,CQ ```sql -Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END +Eg: CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END; ``` #### Maintenance Command @@ -1710,42 +1714,42 @@ Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO - FLUSH ```sql -Eg: IoTDB > flush +Eg: flush ``` - MERGE ```sql -Eg: IoTDB > MERGE -Eg: IoTDB > FULL MERGE +Eg: MERGE; +Eg: FULL MERGE; ``` - CLEAR CACHE ```sql -Eg: IoTDB > CLEAR CACHE +Eg: CLEAR CACHE ``` - START REPAIR DATA ```sql -Eg: IoTDB > START REPAIR DATA +Eg: START REPAIR DATA ``` - STOP REPAIR DATA ```sql -Eg: IoTDB > STOP REPAIR DATA +Eg: STOP REPAIR DATA ``` - SET SYSTEM TO READONLY / WRITABLE ```sql -Eg: IoTDB > SET SYSTEM TO READONLY / WRITABLE +Eg: SET SYSTEM TO READONLY / WRITABLE ``` - Query abort ```sql -Eg: IoTDB > KILL QUERY 1 +Eg: KILL QUERY 1 ``` \ No newline at end of file diff --git a/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md b/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md index 9683a27ad..9316c895d 100644 --- a/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md +++ b/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md @@ -28,33 +28,33 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 1.1 Create Database ```sql -IoTDB > create database root.ln -IoTDB > create database root.sgcc +create database root.ln; +create database root.sgcc; ``` ### 1.2 Show Databases ```sql -IoTDB> SHOW DATABASES -IoTDB> SHOW DATABASES root.** +SHOW DATABASES; +SHOW DATABASES root.**; ``` ### 1.3 Delete Database ```sql -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// delete all data, all timeseries and all databases -IoTDB > DELETE DATABASE root.** +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// delete all data, all timeseries and all databases; +DELETE DATABASE root.**; ``` ### 1.4 Count Databases ```sql -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` ### 1.5 Setting up heterogeneous databases (Advanced operations) @@ -74,7 +74,7 @@ ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; #### Show heterogeneous databases ```sql -SHOW DATABASES DETAILS +SHOW DATABASES DETAILS; ``` ### 1.6 TTL @@ -82,25 +82,25 @@ SHOW DATABASES DETAILS #### Set TTL ```sql -IoTDB> set ttl to root.ln 3600000 -IoTDB> set ttl to root.sgcc.** 3600000 -IoTDB> set ttl to root.** 3600000 +set ttl to root.ln 3600000; +set ttl to root.sgcc.** 3600000; +set ttl to root.** 3600000; ``` #### Unset TTL ```sql -IoTDB> unset ttl from root.ln -IoTDB> unset ttl from root.sgcc.** -IoTDB> unset ttl from root.** +unset ttl from root.ln; +unset ttl from root.sgcc.**; +unset ttl from root.**; ``` #### Show TTL ```sql -IoTDB> SHOW ALL TTL -IoTDB> SHOW TTL ON StorageGroupNames -IoTDB> SHOW DEVICES +SHOW ALL TTL; +SHOW TTL ON StorageGroupNames; +SHOW DEVICES; ``` ## 2. TIMESERIES MANAGEMENT @@ -110,108 +110,108 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 2.1 Create Timeseries ```sql -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - From v0.13, you can use a simplified version of the SQL statements to create timeseries: ```sql -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - Notice that when in the CREATE TIMESERIES statement the encoding method conflicts with the data type, the system gives the corresponding error prompt as shown below: ```sql -IoTDB > create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN; error: encoding TS_2DIFF does not support BOOLEAN ``` ### 2.2 Create Aligned Timeseries ```sql -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT) +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT); ``` ### 2.3 Delete Timeseries ```sql -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 2.4 Show Timeseries ```sql -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** -IoTDB> show timeseries root.ln.** limit 10 offset 10 -IoTDB> show timeseries root.ln.** where timeseries contains 'wf01.wt' -IoTDB> show timeseries root.ln.** where dataType=FLOAT +show timeseries root.**; +show timeseries root.ln.**; +show timeseries root.ln.** limit 10 offset 10; +show timeseries root.ln.** where timeseries contains 'wf01.wt'; +show timeseries root.ln.** where dataType=FLOAT; ``` ### 2.5 Count Timeseries ```sql -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc'; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c'; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c'; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` ### 2.6 Tag and Attribute Management ```sql -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` * Rename the tag/attribute key ```SQL -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` * Reset the tag/attribute value ```SQL -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` * Delete the existing tag/attribute ```SQL -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` * Add new tags ```SQL -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` * Add new attributes ```SQL -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` * Upsert alias, tags and attributes @@ -219,49 +219,51 @@ ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 > add alias or a new key-value if the alias or key doesn't exist, otherwise, update the old one with new value. ```SQL -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4); ``` * Show timeseries using tags. Use TAGS(tagKey) to identify the tags used as filter key ```SQL -SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause +SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; ``` returns all the timeseries information that satisfy the where condition and match the pathPattern. SQL statements are as follows: ```SQL -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` - count timeseries using tags ```SQL -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` returns all the number of timeseries that satisfy the where condition and match the pathPattern. SQL statements are as follows: ```SQL -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` create aligned timeseries ```SQL -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` The execution result is as follows: ```SQL -IoTDB> show timeseries +show timeseries; +``` +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -273,7 +275,9 @@ IoTDB> show timeseries Support query: ```SQL -IoTDB> show timeseries where TAGS(tag1)='v1' +show timeseries where TAGS(tag1)='v1'; +``` +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -290,40 +294,40 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 3.1 Show Child Paths ```SQL -SHOW CHILD PATHS pathPattern +SHOW CHILD PATHS pathPattern; ``` ### 3.2 Show Child Nodes ```SQL -SHOW CHILD NODES pathPattern +SHOW CHILD NODES pathPattern; ``` ### 3.3 Count Nodes ```SQL -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.** LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.** LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` ### 3.4 Show Devices ```SQL -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; +show devices with database; +show devices root.ln.** with database; ``` ### 3.5 Count Devices ```SQL -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +show devices; +count devices; +count devices root.ln.**; ``` ## 4. INSERT & LOAD DATA @@ -337,30 +341,30 @@ For more details, see document [Write-Data](../Basic-Concept/Write-Data_timecho) - Insert Single Timeseries ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) -IoTDB > insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1') +insert into root.ln.wf02.wt02(timestamp,status) values(1,true); +insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'); ``` - Insert Multiple Timeseries ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (2, false, 'v2') -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4') +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (2, false, 'v2'); +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4'); ``` - Use the Current System Timestamp as the Timestamp of the Data Point ```SQL -IoTDB > insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2') +insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2'); ``` #### Insert Data Into Aligned Timeseries ```SQL -IoTDB > create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE) -IoTDB > insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1) -IoTDB > insert into root.sg1.d1(time, s1, s2) aligned values(2, 2, 2), (3, 3, 3) -IoTDB > select * from root.sg1.d1 +create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE); +insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1); +insert into root.sg1.d1(time, s1, s2) aligned values(2, 2, 2), (3, 3, 3); +select * from root.sg1.d1; ``` ### 4.2 Load External TsFile Tool @@ -397,34 +401,34 @@ For more details, see document [Write-Delete-Data](../Basic-Concept/Write-Data_t ### 5.1 Delete Single Timeseries ```sql -IoTDB > delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.status where time < 10 -IoTDB > delete from root.ln.wf02.wt02.status where time <= 10 -IoTDB > delete from root.ln.wf02.wt02.status where time < 20 and time > 10 -IoTDB > delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10 -IoTDB > delete from root.ln.wf02.wt02.status where time > 20 -IoTDB > delete from root.ln.wf02.wt02.status where time >= 20 -IoTDB > delete from root.ln.wf02.wt02.status where time = 20 -IoTDB > delete from root.ln.wf02.wt02.status where time > 4 or time < 0 -Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic -expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' -IoTDB > delete from root.ln.wf02.wt02.status +delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.status where time < 10; +delete from root.ln.wf02.wt02.status where time <= 10; +delete from root.ln.wf02.wt02.status where time < 20 and time > 10; +delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10; +delete from root.ln.wf02.wt02.status where time > 20; +delete from root.ln.wf02.wt02.status where time >= 20; +delete from root.ln.wf02.wt02.status where time = 20; +delete from root.ln.wf02.wt02.status where time > 4 or time < 0; +Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic; +expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND'; +delete from root.ln.wf02.wt02.status; ``` ### 5.2 Delete Multiple Timeseries ```sql -IoTDB > delete from root.ln.wf02.wt02 where time <= 2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; -IoTDB> delete from root.ln.wf03.wt02.status where time < now() +delete from root.ln.wf02.wt02 where time <= 2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; +delete from root.ln.wf03.wt02.status where time < now(); Msg: The statement is executed successfully. ``` ### 5.3 Delete Time Partition (experimental) ```sql -IoTDB > DELETE PARTITION root.ln 0,1,2 +DELETE PARTITION root.ln 0,1,2; ``` ## 6. QUERY DATA @@ -458,31 +462,31 @@ SELECT [LAST] selectExpr [, selectExpr] ... #### Select a Column of Data Based on a Time Interval ```sql -IoTDB > select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` #### Select Multiple Columns of Data Based on a Time Interval ```sql -IoTDB > select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; +select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; ``` #### Select Multiple Columns of Data for the Same Device According to Multiple Time Intervals ```sql -IoTDB > select status,temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); +select status,temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); ``` #### Choose Multiple Columns of Data for Different Devices According to Multiple Time Intervals ```sql -IoTDB > select wf01.wt01.status,wf02.wt02.hardware from root.ln where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); +select wf01.wt01.status,wf02.wt02.hardware from root.ln where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); ``` #### Order By Time Query ```sql -IoTDB > select * from root.ln.** where time > 1 order by time desc limit 10; +select * from root.ln.** where time > 1 order by time desc limit 10; ``` ### 6.2 `SELECT` CLAUSE @@ -490,7 +494,7 @@ IoTDB > select * from root.ln.** where time > 1 order by time desc limit 10; #### Use Alias ```sql -IoTDB > select s1 as temperature, s2 as speed from root.ln.wf01.wt01; +select s1 as temperature, s2 as speed from root.ln.wf01.wt01; ``` #### Nested Expressions @@ -498,35 +502,35 @@ IoTDB > select s1 as temperature, s2 as speed from root.ln.wf01.wt01; ##### Nested Expressions with Time Series Query ```sql -IoTDB > select a, +select a, b, ((a + 1) * 2 - 1) % 2 + 1.5, sin(a + sin(a + sin(b))), -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 from root.sg1; -IoTDB > select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; -IoTDB > select (a + *) / 2 from root.sg1 +select (a + *) / 2 from root.sg1; -IoTDB > select (a + b) * 3 from root.sg, root.ln +select (a + b) * 3 from root.sg, root.ln; ``` ##### Nested Expressions query with aggregations ```sql -IoTDB > select avg(temperature), +select avg(temperature), sin(avg(temperature)), avg(temperature) + 1, -sum(hardware), avg(temperature) + sum(hardware) from root.ln.wf01.wt01; -IoTDB > select avg(*), +select avg(*), (avg(*) + 1) * 3 / 2 -1 -from root.sg1 +from root.sg1; -IoTDB > select avg(temperature), +select avg(temperature), sin(avg(temperature)), avg(temperature) + 1, -sum(hardware), @@ -538,10 +542,10 @@ GROUP BY([10, 90), 10ms); #### Last Query ```sql -IoTDB > select last status from root.ln.wf01.wt01 -IoTDB > select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 -IoTDB > select last * from root.ln.wf01.wt01 order by timeseries desc; -IoTDB > select last * from root.ln.wf01.wt01 order by dataType desc; +select last status from root.ln.wf01.wt01; +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; +select last * from root.ln.wf01.wt01 order by timeseries desc; +select last * from root.ln.wf01.wt01 order by dataType desc; ``` ### 6.3 `WHERE` CLAUSE @@ -549,22 +553,22 @@ IoTDB > select last * from root.ln.wf01.wt01 order by dataType desc; #### Time Filter ```sql -IoTDB > select s1 from root.sg1.d1 where time > 2022-01-01T00:05:00.000; -IoTDB > select s1 from root.sg1.d1 where time = 2022-01-01T00:05:00.000; -IoTDB > select s1 from root.sg1.d1 where time >= 2022-01-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; +select s1 from root.sg1.d1 where time > 2022-01-01T00:05:00.000; +select s1 from root.sg1.d1 where time = 2022-01-01T00:05:00.000; +select s1 from root.sg1.d1 where time >= 2022-01-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; ``` #### Value Filter ```sql -IoTDB > select temperature from root.sg1.d1 where temperature > 36.5; -IoTDB > select status from root.sg1.d1 where status = true; -IoTDB > select temperature from root.sg1.d1 where temperature between 36.5 and 40; -IoTDB > select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -IoTDB > select code from root.sg1.d1 where code in ('200', '300', '400', '500'); -IoTDB > select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); -IoTDB > select code from root.sg1.d1 where temperature is null; -IoTDB > select code from root.sg1.d1 where temperature is not null; +select temperature from root.sg1.d1 where temperature > 36.5; +select status from root.sg1.d1 where status = true; +select temperature from root.sg1.d1 where temperature between 36.5 and 40; +select temperature from root.sg1.d1 where temperature not between 36.5 and 40; +select code from root.sg1.d1 where code in ('200', '300', '400', '500'); +select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); +select code from root.sg1.d1 where temperature is null; +select code from root.sg1.d1 where temperature is not null; ``` #### Fuzzy Query @@ -572,15 +576,15 @@ IoTDB > select code from root.sg1.d1 where temperature is not null; - Fuzzy matching using `Like` ```sql -IoTDB > select * from root.sg.d1 where value like '%cc%' -IoTDB > select * from root.sg.device where value like '_b_' +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; ``` - Fuzzy matching using `Regexp` ```sql -IoTDB > select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -IoTDB > select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; ``` ### 6.4 `GROUP BY` CLAUSE @@ -588,91 +592,91 @@ IoTDB > select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 - Aggregate By Time without Specifying the Sliding Step Length ```sql -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d); +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d); ``` - Aggregate By Time Specifying the Sliding Step Length ```sql -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d); +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d); ``` - Aggregate by Natural Month ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by([2017-11-01T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); -IoTDB > select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); +select count(status) from root.ln.wf01.wt01 group by([2017-11-01T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); +select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); ``` - Left Open And Right Close Range ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d); +select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d); ``` - Aggregation By Variation ```sql -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10); ``` - Aggregation By Condition ```sql -IoTDB > select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=true) -IoTDB > select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=true); +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=false); ``` - Aggregation By Session ```sql -IoTDB > select __endTime,count(*) from root.** group by session(1d) -IoTDB > select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,count(*) from root.** group by session(1d); +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` - Aggregation By Count ```sql -IoTDB > select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) -IoTDB > select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` - Aggregation By Level ```sql -IoTDB > select count(status) from root.** group by level = 1 -IoTDB > select count(status) from root.** group by level = 3 -IoTDB > select count(status) from root.** group by level = 1, 3 -IoTDB > select max_value(temperature) from root.** group by level = 0 -IoTDB > select count(*) from root.ln.** group by level = 2 +select count(status) from root.** group by level = 1; +select count(status) from root.** group by level = 3; +select count(status) from root.** group by level = 1, 3; +select max_value(temperature) from root.** group by level = 0; +select count(*) from root.ln.** group by level = 2; ``` - Aggregate By Time with Level Clause ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d), level=1; -IoTDB > select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; +select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d), level=1; +select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; ``` - Aggregation query by one single tag ```sql -IoTDB > SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); +SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); ``` - Aggregation query by multiple tags ```sql -IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); +SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); ``` - Downsampling Aggregation by tags based on Time Window ```sql -IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS(city, workshop); +SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS(city, workshop); ``` ### 6.5 `HAVING` CLAUSE @@ -680,17 +684,17 @@ IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5 Correct: ```sql -IoTDB > select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1 -IoTDB > select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device +select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1; +select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` Incorrect: ```sql -IoTDB > select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 -IoTDB > select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 -IoTDB > select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 -IoTDB > select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; +select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` ### 6.6 `FILL` CLAUSE @@ -698,7 +702,7 @@ IoTDB > select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having su #### `PREVIOUS` Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(previous); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(previous); ``` #### `PREVIOUS` FILL and specify the fill timeout threshold @@ -709,14 +713,14 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: #### `LINEAR` Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(linear); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(linear); ``` #### Constant Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(2.0); -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(true); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(2.0); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(true); ``` ### 6.7 `LIMIT` and `SLIMIT` CLAUSES (PAGINATION) @@ -724,24 +728,24 @@ IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-1 #### Row Control over Query Results ```sql -IoTDB > select status, temperature from root.ln.wf01.wt01 limit 10 -IoTDB > select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 -IoTDB > select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 2 offset 3 -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 10; +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 2 offset 3; +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 5 offset 3; ``` #### Column Control over Query Results ```sql -IoTDB > select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 -IoTDB > select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 -IoTDB > select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` #### Row and Column Control over Query Results ```sql -IoTDB > select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` ### 6.8 `ORDER BY` CLAUSE @@ -749,31 +753,31 @@ IoTDB > select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 #### Order by in ALIGN BY TIME mode ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; ``` #### Order by in ALIGN BY DEVICE mode ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc,time asc align by device; -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; -IoTDB > select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc,time asc align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` #### Order by arbitrary expressions ```sql -IoTDB > select score from root.** order by score desc align by device -IoTDB > select score,total from root.one order by base+score+bonus desc -IoTDB > select score,total from root.one order by total desc -IoTDB > select base, score, bonus, total from root.** order by total desc NULLS Last, +select score from root.** order by score desc align by device; +select score,total from root.one order by base+score+bonus desc; +select score,total from root.one order by total desc; +select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device -IoTDB > select min_value(total) from root.** order by min_value(total) asc align by device -IoTDB > select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device -IoTDB > select score from root.** order by device asc, score desc, time asc align by device + time desc align by device; +select min_value(total) from root.** order by min_value(total) asc align by device; +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; +select score from root.** order by device asc, score desc, time asc align by device; ``` ### 6.9 `ALIGN BY` CLAUSE @@ -781,54 +785,54 @@ IoTDB > select score from root.** order by device asc, score desc, time asc alig #### Align by Device ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` ### 6.10 `INTO` CLAUSE (QUERY WRITE-BACK) ```sql -IoTDB > select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; -IoTDB > select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); -IoTDB > select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; -IoTDB > select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; ``` - Using variable placeholders: ```sql -IoTDB > select s1, s2 +select s1, s2 into root.sg_copy.d1(::), root.sg_copy.d2(s1), root.sg_copy.d1(${3}), root.sg_copy.d2(::) from root.sg.d1, root.sg.d2; -IoTDB > select d1.s1, d1.s2, d2.s3, d3.s4 +select d1.s1, d1.s2, d2.s3, d3.s4 into ::(s1_1, s2_2), root.sg.d2_2(s3_3), root.${2}_copy.::(s4) from root.sg; -IoTDB > select * into root.sg_bk.::(::) from root.sg.**; +select * into root.sg_bk.::(::) from root.sg.**; -IoTDB > select s1, s2, s3, s4 +select s1, s2, s3, s4 into root.backup_sg.d1(s1, s2, s3, s4), root.backup_sg.d2(::), root.sg.d3(backup_${4}) from root.sg.d1, root.sg.d2, root.sg.d3 align by device; -IoTDB > select avg(s1), sum(s2) + sum(s3), count(s4) +select avg(s1), sum(s2) + sum(s3), count(s4) into root.agg_${2}.::(avg_s1, sum_s2_add_s3, count_s4) from root.** align by device; -IoTDB > select * into ::(backup_${4}) from root.sg.** align by device; +select * into ::(backup_${4}) from root.sg.** align by device; -IoTDB > select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` ## 7. Maintennance Generate the corresponding query plan: ```sql -explain select s1,s2 from root.sg.d1 +explain select s1,s2 from root.sg.d1; ``` Execute the corresponding SQL, analyze the execution and output: ```sql -explain analyze select s1,s2 from root.sg.d1 order by s1 +explain analyze select s1,s2 from root.sg.d1 order by s1; ``` ## 8. OPERATOR @@ -839,7 +843,7 @@ For more details, see document [Operator-and-Expression](./Operator-and-Expressi For details and examples, see the document [Arithmetic Operators and Functions](./Operator-and-Expression.md#_1-1-arithmetic-operators). ```sql -select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1 +select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1; ``` ### 8.2 Comparison Operators @@ -847,27 +851,27 @@ select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root For details and examples, see the document [Comparison Operators and Functions](./Operator-and-Expression.md#_1-2-comparison-operators). ```sql -# Basic comparison operators +# Basic comparison operators; select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; -# `BETWEEN ... AND ...` operator +# `BETWEEN ... AND ...` operator; select temperature from root.sg1.d1 where temperature between 36.5 and 40; select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -# Fuzzy matching operator: Use `Like` for fuzzy matching -select * from root.sg.d1 where value like '%cc%' -select * from root.sg.device where value like '_b_' +# Fuzzy matching operator: Use `Like` for fuzzy matching; +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; -# Fuzzy matching operator: Use `Regexp` for fuzzy matching -select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +# Fuzzy matching operator: Use `Regexp` for fuzzy matching; +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; select b, b like '1%', b regexp '[0-2]' from root.test; -# `IS NULL` operator +# `IS NULL` operator; select code from root.sg1.d1 where temperature is null; select code from root.sg1.d1 where temperature is not null; -# `IN` operator +# `IN` operator; select code from root.sg1.d1 where code in ('200', '300', '400', '500'); select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); select a, a in (1, 2) from root.test; @@ -923,25 +927,25 @@ For details and examples, see the document [String Processing](./Operator-and-Ex ```sql select s1, string_contains(s1, 's'='warn') from root.sg1.d4; select s1, string_matches(s1, 'regex'='[^\\s]+37229') from root.sg1.d4; -select s1, length(s1) from root.sg1.d1 -select s1, locate(s1, "target"="1") from root.sg1.d1 -select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 -select s1, startswith(s1, "target"="1") from root.sg1.d1 -select s1, endswith(s1, "target"="1") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 -select s1, substring(s1 from 1 for 2) from root.sg1.d1 -select s1, replace(s1, 'es', 'tt') from root.sg1.d1 -select s1, upper(s1) from root.sg1.d1 -select s1, lower(s1) from root.sg1.d1 -select s3, trim(s3) from root.sg1.d1 -select s1, s2, strcmp(s1, s2) from root.sg1.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1 -select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 +select s1, length(s1) from root.sg1.d1; +select s1, locate(s1, "target"="1") from root.sg1.d1; +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1; +select s1, startswith(s1, "target"="1") from root.sg1.d1; +select s1, endswith(s1, "target"="1") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1; +select s1, substring(s1 from 1 for 2) from root.sg1.d1; +select s1, replace(s1, 'es', 'tt') from root.sg1.d1; +select s1, upper(s1) from root.sg1.d1; +select s1, lower(s1) from root.sg1.d1; +select s3, trim(s3) from root.sg1.d1; +select s1, s2, strcmp(s1, s2) from root.sg1.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1; +select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1; +select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1; ``` ### 9.5 Data Type Conversion Function @@ -949,7 +953,7 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 For details and examples, see the document [Data Type Conversion Function](./Operator-and-Expression.md#_2-5-data-type-conversion-function). ```sql -SELECT cast(s1 as INT32) from root.sg +SELECT cast(s1 as INT32) from root.sg; ``` ### 9.6 Constant Timeseries Generating Functions @@ -997,8 +1001,8 @@ select equal_size_bucket_agg_sample(temperature, 'type'='avg','proportion'='0.1' select equal_size_bucket_m4_sample(temperature, 'proportion'='0.1') as M4_sample from root.ln.wf01.wt01; select equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='avg', 'number'='2') as outlier_avg_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='stendis', 'number'='2') as outlier_stendis_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='cos', 'number'='2') as outlier_cos_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='prenextdis', 'number'='2') as outlier_prenextdis_sample from root.ln.wf01.wt01; -select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1 -select M4(s1,'windowSize'='10') from root.vehicle.d1 +select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1; +select M4(s1,'windowSize'='10') from root.vehicle.d1; ``` ### 9.11 Change Points Function @@ -1006,7 +1010,7 @@ select M4(s1,'windowSize'='10') from root.vehicle.d1 For details and examples, see the document [Time-Series](./Operator-and-Expression.md#_2-11-change-points-function). ```sql -select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1 +select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1; ``` ## 10. DATA QUALITY FUNCTION LIBRARY @@ -1018,24 +1022,24 @@ For more details, see document [Operator-and-Expression](../SQL-Manual/UDF-Libra For details and examples, see the document [Data-Quality](../SQL-Manual/UDF-Libraries.md#data-quality). ```sql -# Completeness -select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Completeness; +select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Consistency -select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Consistency; +select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Timeliness -select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Timeliness; +select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Validity -select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Validity; +select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Accuracy -select Accuracy(t1,t2,t3,m1,m2,m3) from root.test +# Accuracy; +select Accuracy(t1,t2,t3,m1,m2,m3) from root.test; ``` ### 10.2 Data Profiling @@ -1043,79 +1047,79 @@ select Accuracy(t1,t2,t3,m1,m2,m3) from root.test For details and examples, see the document [Data-Profiling](../SQL-Manual/UDF-Libraries.md#data-profiling). ```sql -# ACF -select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05 +# ACF; +select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05; -# Distinct -select distinct(s2) from root.test.d2 +# Distinct; +select distinct(s2) from root.test.d2; -# Histogram -select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1 +# Histogram; +select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1; -# Integral -select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 -select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10 +# Integral; +select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; +select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10; -# IntegralAvg -select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 +# IntegralAvg; +select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; -# Mad -select mad(s0) from root.test -select mad(s0, "error"="0.01") from root.test +# Mad; +select mad(s0) from root.test; +select mad(s0, "error"="0.01") from root.test; -# Median -select median(s0, "error"="0.01") from root.test +# Median; +select median(s0, "error"="0.01") from root.test; -# MinMax -select minmax(s1) from root.test +# MinMax; +select minmax(s1) from root.test; -# Mode -select mode(s2) from root.test.d2 +# Mode; +select mode(s2) from root.test.d2; -# MvAvg -select mvavg(s1, "window"="3") from root.test +# MvAvg; +select mvavg(s1, "window"="3") from root.test; -# PACF -select pacf(s1, "lag"="5") from root.test +# PACF; +select pacf(s1, "lag"="5") from root.test; -# Percentile -select percentile(s0, "rank"="0.2", "error"="0.01") from root.test +# Percentile; +select percentile(s0, "rank"="0.2", "error"="0.01") from root.test; -# Quantile -select quantile(s0, "rank"="0.2", "K"="800") from root.test +# Quantile; +select quantile(s0, "rank"="0.2", "K"="800") from root.test; -# Period -select period(s1) from root.test.d3 +# Period; +select period(s1) from root.test.d3; -# QLB -select QLB(s1) from root.test.d1 +# QLB; +select QLB(s1) from root.test.d1; -# Resample -select resample(s1,'every'='5m','interp'='linear') from root.test.d1 -select resample(s1,'every'='30m','aggr'='first') from root.test.d1 -select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1 +# Resample; +select resample(s1,'every'='5m','interp'='linear') from root.test.d1; +select resample(s1,'every'='30m','aggr'='first') from root.test.d1; +select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1; -# Sample -select sample(s1,'method'='reservoir','k'='5') from root.test.d1 -select sample(s1,'method'='isometric','k'='5') from root.test.d1 +# Sample; +select sample(s1,'method'='reservoir','k'='5') from root.test.d1; +select sample(s1,'method'='isometric','k'='5') from root.test.d1; -# Segment -select segment(s1, "error"="0.1") from root.test +# Segment; +select segment(s1, "error"="0.1") from root.test; -# Skew -select skew(s1) from root.test.d1 +# Skew; +select skew(s1) from root.test.d1; -# Spline -select spline(s1, "points"="151") from root.test +# Spline; +select spline(s1, "points"="151") from root.test; -# Spread -select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 +# Spread; +select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; -# Stddev -select stddev(s1) from root.test.d1 +# Stddev; +select stddev(s1) from root.test.d1; -# ZScore -select zscore(s1) from root.test +# ZScore; +select zscore(s1) from root.test; ``` ### 10.3 Anomaly Detection @@ -1123,34 +1127,34 @@ select zscore(s1) from root.test For details and examples, see the document [Anomaly-Detection](../SQL-Manual/UDF-Libraries.md#anomaly-detection). ```sql -# IQR -select iqr(s1) from root.test +# IQR; +select iqr(s1) from root.test; -# KSigma -select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# KSigma; +select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# LOF -select lof(s1,s2) from root.test.d1 where time<1000 -select lof(s1, "method"="series") from root.test.d1 where time<1000 +# LOF; +select lof(s1,s2) from root.test.d1 where time<1000; +select lof(s1, "method"="series") from root.test.d1 where time<1000; -# MissDetect -select missdetect(s2,'minlen'='10') from root.test.d2 +# MissDetect; +select missdetect(s2,'minlen'='10') from root.test.d2; -# Range -select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# Range; +select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# TwoSidedFilter -select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test +# TwoSidedFilter; +select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test; -# Outlier -select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test +# Outlier; +select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test; -# MasterTrain -select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test +# MasterTrain; +select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test; -# MasterDetect -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test +# MasterDetect; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test; ``` ### 10.4 Frequency Domain @@ -1158,31 +1162,31 @@ select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3 For details and examples, see the document [Frequency-Domain](../SQL-Manual/UDF-Libraries.md#frequency-domain-analysis). ```sql -# Conv -select conv(s1,s2) from root.test.d2 +# Conv; +select conv(s1,s2) from root.test.d2; -# Deconv -select deconv(s3,s2) from root.test.d2 -select deconv(s3,s2,'result'='remainder') from root.test.d2 +# Deconv; +select deconv(s3,s2) from root.test.d2; +select deconv(s3,s2,'result'='remainder') from root.test.d2; -# DWT -select dwt(s1,"method"="haar") from root.test.d1 +# DWT; +select dwt(s1,"method"="haar") from root.test.d1; -# FFT -select fft(s1) from root.test.d1 -select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1 +# FFT; +select fft(s1) from root.test.d1; +select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1; -# HighPass -select highpass(s1,'wpass'='0.45') from root.test.d1 +# HighPass; +select highpass(s1,'wpass'='0.45') from root.test.d1; -# IFFT -select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1 +# IFFT; +select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1; -# LowPass -select lowpass(s1,'wpass'='0.45') from root.test.d1 +# LowPass; +select lowpass(s1,'wpass'='0.45') from root.test.d1; -# Envelope -select envelope(s1) from root.test.d1 +# Envelope; +select envelope(s1) from root.test.d1; ``` ### 10.5 Data Matching @@ -1190,20 +1194,20 @@ select envelope(s1) from root.test.d1 For details and examples, see the document [Data-Matching](../SQL-Manual/UDF-Libraries.md#data-matching). ```sql -# Cov -select cov(s1,s2) from root.test.d2 +# Cov; +select cov(s1,s2) from root.test.d2; -# DTW -select dtw(s1,s2) from root.test.d2 +# DTW; +select dtw(s1,s2) from root.test.d2; -# Pearson -select pearson(s1,s2) from root.test.d2 +# Pearson; +select pearson(s1,s2) from root.test.d2; -# PtnSym -select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1 +# PtnSym; +select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1; -# XCorr -select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 +# XCorr; +select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05; ``` ### 10.6 Data Repairing @@ -1211,24 +1215,24 @@ select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 For details and examples, see the document [Data-Repairing](../SQL-Manual/UDF-Libraries.md#data-repairing). ```sql -# TimestampRepair -select timestamprepair(s1,'interval'='10000') from root.test.d2 -select timestamprepair(s1) from root.test.d2 +# TimestampRepair; +select timestamprepair(s1,'interval'='10000') from root.test.d2; +select timestamprepair(s1) from root.test.d2; -# ValueFill -select valuefill(s1) from root.test.d2 -select valuefill(s1,"method"="previous") from root.test.d2 +# ValueFill; +select valuefill(s1) from root.test.d2; +select valuefill(s1,"method"="previous") from root.test.d2; -# ValueRepair -select valuerepair(s1) from root.test.d2 -select valuerepair(s1,'method'='LsGreedy') from root.test.d2 +# ValueRepair; +select valuerepair(s1) from root.test.d2; +select valuerepair(s1,'method'='LsGreedy') from root.test.d2; -# MasterRepair -select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test +# MasterRepair; +select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test; -# SeasonalRepair -select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2 -select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 +# SeasonalRepair; +select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2; +select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2; ``` ### 10.7 Series Discovery @@ -1236,12 +1240,12 @@ select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 For details and examples, see the document [Series-Discovery](../SQL-Manual/UDF-Libraries.md#series-discovery). ```sql -# ConsecutiveSequences -select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1 -select consecutivesequences(s1,s2) from root.test.d1 +# ConsecutiveSequences; +select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1; +select consecutivesequences(s1,s2) from root.test.d1; -# ConsecutiveWindows -select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 +# ConsecutiveWindows; +select consecutivewindows(s1,s2,'length'='10m') from root.test.d1; ``` ### 10.8 Machine Learning @@ -1249,14 +1253,14 @@ select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 For details and examples, see the document [Machine-Learning](../SQL-Manual/UDF-Libraries.md#machine-learning). ```sql -# AR -select ar(s0,"p"="2") from root.test.d0 +# AR; +select ar(s0,"p"="2") from root.test.d0; -# Representation -select representation(s0,"tb"="3","vb"="2") from root.test.d0 +# Representation; +select representation(s0,"tb"="3","vb"="2") from root.test.d0; -# RM -select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 +# RM; +select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0; ``` @@ -1270,24 +1274,24 @@ when 1000=1050 then "bad temperature" when P<=1000000 or P>=1100000 then "bad pressure" end as `result` -from root.test1 +from root.test1; select str, case when str like "%cc%" then "has cc" when str like "%dd%" then "has dd" else "no cc and dd" end as `result` -from root.test2 +from root.test2; select count(case when x<=1 then 1 end) as `(-∞,1]`, count(case when 1 ### 14.3 UDF Queries ```sql -SELECT example(*) from root.sg.d1 -SELECT example(s1, *) from root.sg.d1 -SELECT example(*, *) from root.sg.d1 +SELECT example(*) from root.sg.d1; +SELECT example(s1, *) from root.sg.d1; +SELECT example(*, *) from root.sg.d1; SELECT example(s1, 'key1'='value1', 'key2'='value2'), example(*, 'key3'='value3') FROM root.sg.d1; SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; @@ -1495,7 +1499,7 @@ SELECT s1, s2, s1 + example(s1, s2), s1 - example(s1 + example(s1, s2) / s2) FRO ### 14.4 Show All Registered UDFs ```sql -SHOW FUNCTIONS +SHOW FUNCTIONS; ``` ## 15. ADMINISTRATION MANAGEMENT @@ -1507,69 +1511,69 @@ For more details, see document [Authority Management](../User-Manual/Authority-M - Create user (Requires MANAGE_USER permission) ```SQL -CREATE USER -eg: CREATE USER user1 'passwd' +CREATE USER ; +eg: CREATE USER user1 'passwd'; ``` - Delete user (Requires MANAGE_USER permission) ```sql -DROP USER -eg: DROP USER user1 +DROP USER ; +eg: DROP USER user1; ``` - Create role (Requires MANAGE_ROLE permission) ```sql -CREATE ROLE -eg: CREATE ROLE role1 +CREATE ROLE ; +eg: CREATE ROLE role1; ``` - Delete role (Requires MANAGE_ROLE permission) ```sql -DROP ROLE -eg: DROP ROLE role1 +DROP ROLE ; +eg: DROP ROLE role1; ``` - Grant role to user (Requires MANAGE_ROLE permission) ```sql -GRANT ROLE TO -eg: GRANT ROLE admin TO user1 +GRANT ROLE TO ; +eg: GRANT ROLE admin TO user1; ``` - Revoke role from user(Requires MANAGE_ROLE permission) ```sql -REVOKE ROLE FROM -eg: REVOKE ROLE admin FROM user1 +REVOKE ROLE FROM ; +eg: REVOKE ROLE admin FROM user1; ``` - List all user (Requires MANAGE_USER permission) ```sql -LIST USER +LIST USER; ``` - List all role (Requires MANAGE_ROLE permission) ```sql -LIST ROLE +LIST ROLE; ``` - List all users granted specific role.(Requires MANAGE_USER permission) ```sql -LIST USER OF ROLE -eg: LIST USER OF ROLE roleuser +LIST USER OF ROLE ; +eg: LIST USER OF ROLE roleuser; ``` - List all role granted to specific user. ```sql -LIST ROLE OF USER -eg: LIST ROLE OF USER tempuser +LIST ROLE OF USER ; +eg: LIST ROLE OF USER tempuser; ``` - List all privileges of user @@ -1618,13 +1622,13 @@ eg: REVOKE ALL ON root.** FROM USER user1; #### Delete Time Partition (experimental) ```sql -Eg: IoTDB > DELETE PARTITION root.ln 0,1,2 +Eg: DELETE PARTITION root.ln 0,1,2; ``` #### Continuous Query,CQ ```sql -Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END +Eg: CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END; ``` #### Maintenance Command @@ -1632,42 +1636,42 @@ Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO - FLUSH ```sql -Eg: IoTDB > flush +Eg: flush; ``` - MERGE ```sql -Eg: IoTDB > MERGE -Eg: IoTDB > FULL MERGE +Eg: MERGE; +Eg: FULL MERGE; ``` - CLEAR CACHE ```sql -Eg: IoTDB > CLEAR CACHE +Eg: CLEAR CACHE; ``` - START REPAIR DATA ```sql -Eg: IoTDB > START REPAIR DATA +Eg: START REPAIR DATA; ``` - STOP REPAIR DATA ```sql -Eg: IoTDB > STOP REPAIR DATA +Eg: STOP REPAIR DATA; ``` - SET SYSTEM TO READONLY / WRITABLE ```sql -Eg: IoTDB > SET SYSTEM TO READONLY / WRITABLE +Eg: SET SYSTEM TO READONLY / WRITABLE; ``` - Query abort ```sql -Eg: IoTDB > KILL QUERY 1 +Eg: KILL QUERY 1; ``` \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Basic-Concept/Table-Management_apache.md b/src/UserGuide/latest-Table/Basic-Concept/Table-Management_apache.md index 1aacb9d3b..4b9a9c558 100644 --- a/src/UserGuide/latest-Table/Basic-Concept/Table-Management_apache.md +++ b/src/UserGuide/latest-Table/Basic-Concept/Table-Management_apache.md @@ -127,14 +127,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **Examples:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -162,7 +167,9 @@ Used to view column names, data types, categories, and states of a table. **Examples:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -177,8 +184,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -215,7 +225,9 @@ SHOW CREATE TABLE **Example:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -232,12 +244,15 @@ Used to update a table, including adding or deleting columns and configuring tab **Syntax:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **Note::** @@ -249,11 +264,11 @@ ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? col **Example:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 1.6 Delete Tables @@ -269,6 +284,6 @@ DROP TABLE (IF EXISTS)? **Examples:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md b/src/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md index 112f90f6a..e42212e73 100644 --- a/src/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md +++ b/src/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md @@ -127,14 +127,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **Examples:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -162,7 +167,9 @@ Used to view column names, data types, categories, and states of a table. **Examples:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -177,8 +184,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -214,7 +224,9 @@ SHOW CREATE TABLE **Example:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -231,12 +243,15 @@ Used to update a table, including adding or deleting columns and configuring tab **Syntax:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **Note::** @@ -248,11 +263,11 @@ ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? col **Example:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 1.6 Delete Tables @@ -268,6 +283,6 @@ DROP TABLE (IF EXISTS)? **Examples:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_apache.md b/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_apache.md index 193fa718b..729c38c39 100644 --- a/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_apache.md +++ b/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_apache.md @@ -110,7 +110,9 @@ try (ITableSession session = After execution, you can verify the table creation using the following command: ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +-----------+---------+-----------+ | ColumnName| DataType| Category| +-----------+---------+-----------+ @@ -131,9 +133,9 @@ It is possible to insert data for specific columns. Columns not specified will r **Example:** ```SQL -INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1) +INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:38:00', 91.0) +INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:38:00', 91.0); ``` ### 1.4 Null Value Insertion @@ -145,10 +147,10 @@ You can explicitly set `null` values for tag columns, attribute columns, and fie Equivalent to the above partial column insertion. ```SQL -# Equivalent to the example above -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1) +# Equivalent to the example above; +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null) +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null); ``` If no tag columns are included, the system will automatically create a device with all tag column values set to `null`. @@ -165,13 +167,13 @@ IoTDB supports inserting multiple rows of data in a single statement to improve INSERT INTO table1 VALUES ('2025-11-26 13:37:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('2025-11-26 13:38:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25') +('2025-11-26 13:38:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25'); INSERT INTO table1 (region, plant_id, device_id, model_id, maintenance, time, temperature, humidity, status, arrival_time) VALUES ('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:37:00', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25') +('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25'); ``` #### Notes @@ -201,7 +203,7 @@ Using the [sample data](../Reference/Sample-Data.md) as the data source, first c sql ```sql -IoTDB:database1> CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); +CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); Msg: The statement is executed successfully. ``` @@ -214,9 +216,13 @@ The `query` part is a direct `select ... from ...` query. sql ```sql -IoTDB:database1> insert into target_table select time,region,device_id,temperature from table1 where region = 'Beijing' +insert into target_table select time,region,device_id,temperature from table1 where region = 'Beijing'; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region='Beijing' +``` +```sql +select * from target_table where region='Beijing'; +``` +```shell +-----------------------------+--------+-----------+-------------+ | time| region| device_id| temperature| +-----------------------------+--------+-----------+-------------+ @@ -243,9 +249,13 @@ The `query` part uses the table reference syntax `table source_table`. sql ```sql -IoTDB:database1> insert into target_table(time,device_id,temperature) table table3 +insert into target_table(time,device_id,temperature) table table3; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region is null +``` +```sql +select * from target_table where region is null; +``` +```shell +-----------------------------+------+-----------+-------------+ | time|region| device_id| temperature| +-----------------------------+------+-----------+-------------+ @@ -270,9 +280,13 @@ The `query` part is a parenthesized subquery. sql ```sql -IoTDB:database1> insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = 'Shanghai')) +insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = 'Shanghai')); Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region = 'Shanghai' +``` +```sql +select * from target_table where region = 'Shanghai'; +``` +```shell +-----------------------------+---------+-----------+-------------+ | time| region| device_id| temperature| +-----------------------------+---------+-----------+-------------+ @@ -339,13 +353,13 @@ INSERT INTO table1(time, device_id, s1) VALUES(NOW(), 'tag1', TO_OBJECT(TRUE, 0, 2. **Segmented write** ```SQL --- First write: TO_OBJECT(FALSE, 0, X'696F') +-- First write: TO_OBJECT(FALSE, 0, X'696F'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(FALSE, 0, X'696F')); --- Second write: TO_OBJECT(FALSE, 2, X'7464') +-- Second write: TO_OBJECT(FALSE, 2, X'7464'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(FALSE, 2, X'7464')); --- Third write: TO_OBJECT(TRUE, 4, X'62') +-- Third write: TO_OBJECT(TRUE, 4, X'62'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(TRUE, 4, X'62')); ``` @@ -379,5 +393,5 @@ updateAssignment **Example**: ```SQL -update table1 set b = a where substring(a, 1, 1) like '%' +update table1 set b = a where substring(a, 1, 1) like '%'; ``` diff --git a/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md b/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md index ef146a655..e6055f40d 100644 --- a/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md +++ b/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md @@ -110,7 +110,9 @@ try (ITableSession session = After execution, you can verify the table creation using the following command: ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +-----------+---------+-----------+ | ColumnName| DataType| Category| +-----------+---------+-----------+ @@ -131,9 +133,9 @@ It is possible to insert data for specific columns. Columns not specified will r **Example:** ```SQL -INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1) +INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:38:00', 91.0) +INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('Hamburg', '1001', '100', '2025-11-26 13:38:00', 91.0); ``` ### 1.4 Null Value Insertion @@ -145,10 +147,10 @@ You can explicitly set `null` values for tag columns, attribute columns, and fie Equivalent to the above partial column insertion. ```SQL -# Equivalent to the example above -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1) +# Equivalent to the example above; +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null) +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('Hamburg', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null); ``` If no tag columns are included, the system will automatically create a device with all tag column values set to `null`. @@ -165,13 +167,13 @@ IoTDB supports inserting multiple rows of data in a single statement to improve INSERT INTO table1 VALUES ('2025-11-26 13:37:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('2025-11-26 13:38:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25') +('2025-11-26 13:38:00', 'Frankfurt', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25'); INSERT INTO table1 (region, plant_id, device_id, model_id, maintenance, time, temperature, humidity, status, arrival_time) VALUES ('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:37:00', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25') +('Frankfurt', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25'); ``` #### Notes @@ -201,7 +203,7 @@ Using the [sample data](../Reference/Sample-Data.md) as the data source, first c sql ```sql -IoTDB:database1> CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); +CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); Msg: The statement is executed successfully. ``` @@ -214,9 +216,13 @@ The `query` part is a direct `select ... from ...` query. sql ```sql -IoTDB:database1> insert into target_table select time,region,device_id,temperature from table1 where region = 'Beijing' +insert into target_table select time,region,device_id,temperature from table1 where region = 'Beijing'; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region='Beijing' +``` +```sql +select * from target_table where region='Beijing'; +``` +```shell +-----------------------------+--------+-----------+-------------+ | time| region| device_id| temperature| +-----------------------------+--------+-----------+-------------+ @@ -243,9 +249,13 @@ The `query` part uses the table reference syntax `table source_table`. sql ```sql -IoTDB:database1> insert into target_table(time,device_id,temperature) table table3 +insert into target_table(time,device_id,temperature) table table3; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region is null +``` +```sql +select * from target_table where region is null; +``` +```shell +-----------------------------+------+-----------+-------------+ | time|region| device_id| temperature| +-----------------------------+------+-----------+-------------+ @@ -270,9 +280,13 @@ The `query` part is a parenthesized subquery. sql ```sql -IoTDB:database1> insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = 'Shanghai')) +insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = 'Shanghai')); Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region = 'Shanghai' +``` +```sql +select * from target_table where region = 'Shanghai'; +``` +```shell +-----------------------------+---------+-----------+-------------+ | time| region| device_id| temperature| +-----------------------------+---------+-----------+-------------+ @@ -339,13 +353,13 @@ INSERT INTO table1(time, device_id, s1) VALUES(NOW(), 'tag1', TO_OBJECT(TRUE, 0, 2. **Segmented write** ```SQL --- First write: TO_OBJECT(FALSE, 0, X'696F') +-- First write: TO_OBJECT(FALSE, 0, X'696F'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(FALSE, 0, X'696F')); --- Second write: TO_OBJECT(FALSE, 2, X'7464') +-- Second write: TO_OBJECT(FALSE, 2, X'7464'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(FALSE, 2, X'7464')); --- Third write: TO_OBJECT(TRUE, 4, X'62') +-- Third write: TO_OBJECT(TRUE, 4, X'62'); INSERT INTO table1(time, device_id, s1) VALUES(1, 'tag1', TO_OBJECT(TRUE, 4, X'62')); ``` @@ -378,5 +392,5 @@ updateAssignment **Example**: ```SQL -update table1 set b = a where substring(a, 1, 1) like '%' +update table1 set b = a where substring(a, 1, 1) like '%'; ``` diff --git a/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_apache.md b/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_apache.md index 00cb489d3..2246ab609 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_apache.md +++ b/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_apache.md @@ -39,7 +39,7 @@ CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? CREATE DATABASE database1; CREATE DATABASE IF NOT EXISTS database1; --- Create database with 1-year TTL +-- Create database with 1-year TTL; CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); ``` @@ -54,7 +54,7 @@ USE **Examples:** ```SQL -USE database1 +USE database1; ``` ### 1.3 View Current Database @@ -62,22 +62,26 @@ USE database1 **Syntax:** ```SQL -SHOW CURRENT_DATABASE +SHOW CURRENT_DATABASE; ``` **Examples:** ```SQL -IoTDB> SHOW CURRENT_DATABASE; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ | null| +---------------+ - -IoTDB> USE database1; - -IoTDB> SHOW CURRENT_DATABASE; +``` +```sql +USE database1; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ @@ -98,15 +102,20 @@ SHOW DATABASES (DETAILS)? **Examples:** ```SQL -IoTDB> show databases +show databases; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| +------------------+-------+-----------------------+---------------------+---------------------+ | database1| INF| 1| 1| 604800000| |information_schema| INF| null| null| null| +------------------+-------+-----------------------+---------------------+---------------------+ - -IoTDB> show databases details +``` +```sql +show databases details; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|DataRegionGroupNum| +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ @@ -140,7 +149,7 @@ DROP DATABASE (IF EXISTS)? **Examples:** ```SQL -DROP DATABASE IF EXISTS database1 +DROP DATABASE IF EXISTS database1; ``` ## 2. Table Management @@ -218,14 +227,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **Examples:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -244,7 +258,9 @@ IoTDB> show tables details from database1 **Examples:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -259,8 +275,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -289,7 +308,9 @@ SHOW CREATE TABLE **Examples:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -305,22 +326,25 @@ Total line number = 1 **Syntax:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **Examples:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 2.6 Drop Table @@ -334,8 +358,8 @@ DROP TABLE (IF EXISTS)? **Examples:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` diff --git a/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md b/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md index 5c68a9b43..7de333850 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md +++ b/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md @@ -39,7 +39,7 @@ CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? CREATE DATABASE database1; CREATE DATABASE IF NOT EXISTS database1; --- Create database with 1-year TTL +-- Create database with 1-year TTL; CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); ``` @@ -54,7 +54,7 @@ USE **Examples:** ```SQL -USE database1 +USE database1; ``` ### 1.3 View Current Database @@ -62,22 +62,26 @@ USE database1 **Syntax:** ```SQL -SHOW CURRENT_DATABASE +SHOW CURRENT_DATABASE; ``` **Examples:** ```SQL -IoTDB> SHOW CURRENT_DATABASE; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ | null| +---------------+ - -IoTDB> USE database1; - -IoTDB> SHOW CURRENT_DATABASE; +``` +```sql +USE database1; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ @@ -98,15 +102,20 @@ SHOW DATABASES (DETAILS)? **Examples:** ```SQL -IoTDB> show databases +show databases; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| +------------------+-------+-----------------------+---------------------+---------------------+ | database1| INF| 1| 1| 604800000| |information_schema| INF| null| null| null| +------------------+-------+-----------------------+---------------------+---------------------+ - -IoTDB> show databases details +``` +```sql +show databases details; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|DataRegionGroupNum| +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ @@ -140,7 +149,7 @@ DROP DATABASE (IF EXISTS)? **Examples:** ```SQL -DROP DATABASE IF EXISTS database1 +DROP DATABASE IF EXISTS database1; ``` ## 2. Table Management @@ -218,14 +227,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **Examples:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -244,7 +258,9 @@ IoTDB> show tables details from database1 **Examples:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -259,8 +275,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -289,7 +308,9 @@ SHOW CREATE TABLE **Examples:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -304,22 +325,25 @@ Total line number = 1 **Syntax:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **Examples:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 2.6 Drop Table @@ -333,8 +357,8 @@ DROP TABLE (IF EXISTS)? **Examples:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` diff --git a/src/UserGuide/latest/Basic-Concept/Operate-Metadata_apache.md b/src/UserGuide/latest/Basic-Concept/Operate-Metadata_apache.md index d4c6e51dd..10cf8b1ca 100644 --- a/src/UserGuide/latest/Basic-Concept/Operate-Metadata_apache.md +++ b/src/UserGuide/latest/Basic-Concept/Operate-Metadata_apache.md @@ -27,9 +27,9 @@ According to the storage model we can set up the corresponding database. Two SQL statements are supported for creating databases, as follows: -``` -IoTDB > create database root.ln -IoTDB > create database root.sgcc +```sql +create database root.ln; +create database root.sgcc; ``` We can thus create two databases using the above two SQL statements. @@ -38,11 +38,11 @@ It is worth noting that 1 database is recommended. When the path itself or the parent/child layer of the path is already created as database, the path is then not allowed to be created as database. For example, it is not feasible to create `root.ln.wf01` as database when two databases `root.ln` and `root.sgcc` exist. The system gives the corresponding error prompt as shown below: -``` -IoTDB> CREATE DATABASE root.ln.wf01 -Msg: 300: root.ln has already been created as database. -IoTDB> create database root.ln.wf01 -Msg: 300: root.ln has already been created as database. +```sql +CREATE DATABASE root.ln.wf01; +Msg: 300: root.ln has already been created as database; +create database root.ln.wf01; +Msg: 300: root.ln has already been created as database; ``` Database Node Naming Rules: @@ -59,9 +59,9 @@ Besides, if deploy on Windows system, the LayerName is case-insensitive, which m After creating the database, we can use the [SHOW DATABASES](../SQL-Manual/SQL-Manual_apache) statement and [SHOW DATABASES \](../SQL-Manual/SQL-Manual_apache) to view the databases. The SQL statements are as follows: -``` -IoTDB> SHOW DATABASES -IoTDB> SHOW DATABASES root.** +```sql +SHOW DATABASES; +SHOW DATABASES root.**; ``` The result is as follows: @@ -81,11 +81,11 @@ It costs 0.060s User can use the `DELETE DATABASE ` statement to delete all databases matching the pathPattern. Please note the data in the database will also be deleted. -``` -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// delete all data, all timeseries and all databases -IoTDB > DELETE DATABASE root.** +```sql +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// delete all data, all timeseries and all databases; +DELETE DATABASE root.**; ``` ### 1.4 Count Databases @@ -94,11 +94,11 @@ User can use the `COUNT DATABASE ` statement to count the number of SQL statement is as follows: -``` -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +```sql +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` The result is as follows: @@ -176,13 +176,13 @@ Users can set any heterogeneous parameters when creating a Database, or adjust s The user can set any of the above heterogeneous parameters when creating a Database. The SQL statement is as follows: -``` +```sql CREATE DATABASE prefixPath (WITH databaseAttributeClause (COMMA? databaseAttributeClause)*)? ``` For example: -``` +```sql CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTOR=3, SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -190,13 +190,13 @@ CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTO Users can adjust some heterogeneous parameters during the IoTDB runtime, as shown in the following SQL statement: -``` +```sql ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)* ``` For example: -``` +```sql ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -209,14 +209,14 @@ Note that only the following heterogeneous parameters can be adjusted at runtime The user can query the specific heterogeneous configuration of each Database, and the SQL statement is as follows: -``` +```sql SHOW DATABASES DETAILS prefixPath? ``` For example: -``` -IoTDB> SHOW DATABASES DETAILS +```sql +SHOW DATABASES DETAILS +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ |Database| TTL|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|MinSchemaRegionGroupNum|MaxSchemaRegionGroupNum|DataRegionGroupNum|MinDataRegionGroupNum|MaxDataRegionGroupNum| +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ @@ -278,7 +278,7 @@ The set ttl operation can be understood as setting a TTL rule, for example, sett The unset ttl operation indicates unmounting TTL for the corresponding path pattern; if there is no corresponding TTL, nothing will be done. If you want to set TTL to be infinitely large, you can use the INF keyword. The SQL Statement for setting TTL is as follow: -``` +```sql set ttl to pathPattern 360000; ``` Set the Time to Live (TTL) to a pathPattern of 360,000 milliseconds; the pathPattern should not contain a wildcard (\*) in the middle and must end with a double asterisk (\*\*). The pathPattern is used to match corresponding devices. @@ -289,25 +289,25 @@ It is also permissible to specify a particular device without a wildcard (*). To unset TTL, we can use follwing SQL statement: -``` -IoTDB> unset ttl from root.ln +```sql +unset ttl from root.ln ``` After unset TTL, all data will be accepted in `root.ln`. -``` -IoTDB> unset ttl from root.sgcc.** +```sql +unset ttl from root.sgcc.** ``` Unset the TTL in the `root.sgcc` path. New syntax -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.** ``` Old syntax -``` -IoTDB> unset ttl to root.** +```sql +unset ttl to root.** ``` There is no functional difference between the old and new syntax, and they are compatible with each other. The new syntax is just more conventional in terms of wording. @@ -320,8 +320,8 @@ To Show TTL, we can use following SQL statement: show all ttl -``` -IoTDB> SHOW ALL TTL +```sql +SHOW ALL TTL +--------------+--------+ | path| TTL| | root.**|55555555| @@ -330,8 +330,8 @@ IoTDB> SHOW ALL TTL ``` show ttl on pathPattern -``` -IoTDB> SHOW TTL ON root.db.**; +```sql +SHOW TTL ON root.db.**; +--------------+--------+ | path| TTL| | root.db.**|55555555| @@ -343,8 +343,8 @@ The SHOW ALL TTL example gives the TTL for all path patterns. The SHOW TTL ON pathPattern shows the TTL for the path pattern specified. Display devices' ttl -``` -IoTDB> show devices +```sql +show devices +---------------+---------+---------+ | Device|IsAligned| TTL| +---------------+---------+---------+ @@ -372,22 +372,22 @@ CREATE DEVICE TEMPLATE ALIGNED? '(' create device template t1 (temperature FLOAT, status BOOLEAN) +```sql +create device template t1 (temperature FLOAT, status BOOLEAN) ``` **Example 2:** Create a template containing a group of aligned timeseries -```shell -IoTDB> create device template t2 aligned (lat FLOAT, lon FLOAT) +```sql +create device template t2 aligned (lat FLOAT, lon FLOAT) ``` The` lat` and `lon` measurements are aligned. When creating a template, the system will automatically assign default encoding and compression methods, requiring no manual specification. If your business scenario requires custom adjustments, you may refer to the following example: -```shell -IoTDB> create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) +```sql +create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) ``` For a full list of supported data types and corresponding encoding methods, please refer to [Compression & Encoding](../Technical-Insider/Encoding-and-Compression.md)。 @@ -404,8 +404,8 @@ After a device template is created, it should be set to specific path before cre The SQL Statement for setting device template is as follow: -```shell -IoTDB> set device template t1 to root.sg1.d1 +```sql +set device template t1 to root.sg1.d1 ``` ### 2.3 Activate Device Template @@ -415,17 +415,17 @@ After setting the device template, with the system enabled to auto create schema **Attention**: Before inserting data or the system not enabled to auto create schema, timeseries defined by the device template will not be created. You can use the following SQL statement to create the timeseries or activate the templdeviceate, act before inserting data: -```shell -IoTDB> create timeseries using device template on root.sg1.d1 +```sql +create timeseries using device template on root.sg1.d1 ``` **Example:** Execute the following statement -```shell -IoTDB> set device template t1 to root.sg1.d1 -IoTDB> set device template t2 to root.sg1.d2 -IoTDB> create timeseries using device template on root.sg1.d1 -IoTDB> create timeseries using device template on root.sg1.d2 +```sql +set device template t1 to root.sg1.d1; +set device template t2 to root.sg1.d2; +create timeseries using device template on root.sg1.d1; +create timeseries using device template on root.sg1.d2; ``` Show the time series: @@ -434,7 +434,7 @@ Show the time series: show timeseries root.sg1.** ```` -```shell +```sql +-----------------------+-----+-------------+--------+--------+-----------+----+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression|tags|attributes|deadband|deadband parameters| +-----------------------+-----+-------------+--------+--------+-----------+----+----------+--------+-------------------+ @@ -451,7 +451,7 @@ Show the devices: show devices root.sg1.** ```` -```shell +```sql +---------------+---------+ | devices|isAligned| +---------------+---------+ @@ -466,13 +466,13 @@ show devices root.sg1.** The SQL statement looks like this: -```shell -IoTDB> show device templates +```sql +show device templates ``` The execution result is as follows: -```shell +```sql +-------------+ |template name| +-------------+ @@ -485,13 +485,13 @@ The execution result is as follows: The SQL statement looks like this: -```shell -IoTDB> show nodes in device template t1 +```sql +show nodes in device template t1 ``` The execution result is as follows: -```shell +```sql +-----------+--------+--------+-----------+ |child nodes|dataType|encoding|compression| +-----------+--------+--------+-----------+ @@ -502,13 +502,13 @@ The execution result is as follows: - Show the path prefix where a device template is set -```shell -IoTDB> show paths set device template t1 +```sql +show paths set device template t1 ``` The execution result is as follows: -```shell +```sql +-----------+ |child paths| +-----------+ @@ -518,13 +518,13 @@ The execution result is as follows: - Show the path prefix where a device template is used (i.e. the time series has been created) -```shell -IoTDB> show paths using device template t1 +```sql +show paths using device template t1 ``` The execution result is as follows: -```shell +```sql +-----------+ |child paths| +-----------+ @@ -536,26 +536,26 @@ The execution result is as follows: To delete a group of timeseries represented by device template, namely deactivate the device template, use the following SQL statement: -```shell -IoTDB> delete timeseries of device template t1 from root.sg1.d1 +```sql +delete timeseries of device template t1 from root.sg1.d1 ``` or -```shell -IoTDB> deactivate device template t1 from root.sg1.d1 +```sql +deactivate device template t1 from root.sg1.d1 ``` The deactivation supports batch process. -```shell -IoTDB> delete timeseries of device template t1 from root.sg1.*, root.sg2.* +```sql +delete timeseries of device template t1 from root.sg1.*, root.sg2.* ``` or -```shell -IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* +```sql +deactivate device template t1 from root.sg1.*, root.sg2.* ``` If the template name is not provided in sql, all template activation on paths matched by given path pattern will be removed. @@ -564,8 +564,8 @@ If the template name is not provided in sql, all template activation on paths ma The SQL Statement for unsetting device template is as follow: -```shell -IoTDB> unset device template t1 from root.sg1.d1 +```sql +unset device template t1 from root.sg1.d1 ``` **Attention**: It should be guaranteed that none of the timeseries represented by the target device template exists, before unset it. It can be achieved by deactivation operation. @@ -574,8 +574,8 @@ IoTDB> unset device template t1 from root.sg1.d1 The SQL Statement for dropping device template is as follow: -```shell -IoTDB> drop device template t1 +```sql +drop device template t1 ``` **Attention**: Dropping an already set template is not supported. @@ -586,8 +586,8 @@ In a scenario where measurements need to be added, you can modify the template The SQL Statement for altering device template is as follow: -```shell -IoTDB> alter device template t1 add (speed FLOAT) +```sql +alter device template t1 add (speed FLOAT) ``` **When executing data insertion to devices with device template set on related prefix path and there are measurements not present in this device template, the measurements will be auto added to this device template.** @@ -598,36 +598,36 @@ IoTDB> alter device template t1 add (speed FLOAT) According to the storage model selected before, we can create corresponding timeseries in the two databases respectively. The SQL statements for creating timeseries are as follows: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +```sql +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` From v0.13, you can use a simplified version of the SQL statements to create timeseries: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT +```sql +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` When creating a timeseries, the system will automatically assign default encoding and compression methods, requiring no manual specification. If your business scenario requires custom adjustments, you may refer to the following example: -```shell -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY +```sql +create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY ``` Note that if you manually specify an encoding method that is incompatible with the data type, the system will return an error message, as shown below: -``` -IoTDB > create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF +```sql +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF; error: encoding TS_2DIFF does not support BOOLEAN ``` @@ -637,8 +637,8 @@ For a full list of supported data types and corresponding encoding methods, plea The SQL statement for creating a group of timeseries are as follows: -``` -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +```sql +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) ``` You can set different datatype, encoding, and compression for the timeseries in a group of aligned timeseries @@ -651,11 +651,11 @@ To delete the timeseries we created before, we are able to use `(DELETE | DROP) The usage are as follows: -``` -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +```sql +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 3.4 Show Timeseries @@ -676,9 +676,9 @@ Examples: returns all timeseries information matching the given <`PathPattern`>. SQL statements are as follows: -``` -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** +```sql +show timeseries root.**; +show timeseries root.ln.**; ``` The results are shown below respectively: @@ -714,7 +714,7 @@ It costs 0.004s returns all the timeseries information start from the offset and limit the number of series returned. For example, -``` +```sql show timeseries root.ln.** limit 10 offset 10 ``` @@ -722,7 +722,7 @@ show timeseries root.ln.** limit 10 offset 10 The query result set is filtered by string fuzzy matching based on the names of the timeseries. For example: -``` +```sql show timeseries root.ln.** where timeseries contains 'wf01.wt' ``` @@ -743,7 +743,7 @@ It costs 0.016s The query result set is filtered by data type. For example: -``` +```sql show timeseries root.ln.** where dataType=FLOAT ``` @@ -767,9 +767,9 @@ It costs 0.016s The query result set is filtered by tags. For example: -``` -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` The query results are as follows: @@ -811,16 +811,16 @@ IoTDB is able to use `COUNT TIMESERIES ` to count the number of timeseries * `LEVEL` could be defined to show count the number of timeseries of each node at the given level in current Metadata Tree. This could be used to query the number of sensors under each device. The grammar is: `COUNT TIMESERIES GROUP BY LEVEL=`. -``` -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +```sql +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' ; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; ``` For example, if there are several timeseries (use `show timeseries` to show all timeseries): @@ -847,10 +847,10 @@ Then the Metadata Tree will be as below: As can be seen, `root` is considered as `LEVEL=0`. So when you enter statements such as: -``` -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` You will get following results: @@ -897,7 +897,7 @@ The differences between tag and attribute are: The SQL statements for creating timeseries with extra tag and attribute information are extended as follows: -``` +```sql create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) ``` @@ -911,31 +911,31 @@ We can update the tag information after creating it as following: * Rename the tag/attribute key -``` +```sql ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 ``` * Reset the tag/attribute value -``` +```sql ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 ``` * Delete the existing tag/attribute -``` +```sql ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 ``` * Add new tags -``` +```sql ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 ``` * Add new attributes -``` +```sql ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 ``` @@ -943,23 +943,23 @@ ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 > add alias or a new key-value if the alias or key doesn't exist, otherwise, update the old one with new value. -``` +```sql ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) ``` * Show timeseries using tags. Use TAGS(tagKey) to identify the tags used as filter key -``` +```sql SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause ``` returns all the timeseries information that satisfy the where condition and match the pathPattern. SQL statements are as follows: -``` -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` The results are shown below respectly: @@ -984,23 +984,23 @@ It costs 0.004s - count timeseries using tags -``` -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +```sql +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` returns all the number of timeseries that satisfy the where condition and match the pathPattern. SQL statements are as follows: -``` -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +```sql +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` The results are shown below respectly : ``` -IoTDB> count timeseries +count timeseries +-----------------+ |count(timeseries)| +-----------------+ @@ -1008,7 +1008,7 @@ IoTDB> count timeseries +-----------------+ Total line number = 1 It costs 0.019s -IoTDB> count timeseries root.** where TAGS(unit)='c' +count timeseries root.** where TAGS(unit)='c' +-----------------+ |count(timeseries)| +-----------------+ @@ -1016,7 +1016,7 @@ IoTDB> count timeseries root.** where TAGS(unit)='c' +-----------------+ Total line number = 1 It costs 0.020s -IoTDB> count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries root.** where TAGS(unit)='c' group by level = 2 +--------------+-----------------+ | column|count(timeseries)| +--------------+-----------------+ @@ -1032,14 +1032,14 @@ It costs 0.011s create aligned timeseries -``` +```sql create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) ``` The execution result is as follows: -``` -IoTDB> show timeseries +```sql +show timeseries +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -1050,8 +1050,8 @@ IoTDB> show timeseries Support query: -``` -IoTDB> show timeseries where TAGS(tag1)='v1' +```sql +show timeseries where TAGS(tag1)='v1' +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -1117,7 +1117,7 @@ To make it more convenient and efficient to express multiple time series, IoTDB ### 4.5 Show Child Paths -``` +```sql SHOW CHILD PATHS pathPattern ``` @@ -1145,7 +1145,7 @@ It costs 0.002s ### 4.6 Show Child Nodes -``` +```sql SHOW CHILD NODES pathPattern ``` @@ -1182,11 +1182,11 @@ IoTDB is able to use `COUNT NODES LEVEL=` to count the nu This could be used to query the number of devices with specified measurements. The usage are as follows: -``` -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.** LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +```sql +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.** LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` As for the above mentioned example and Metadata tree, you can get following results: @@ -1239,10 +1239,10 @@ Similar to `Show Timeseries`, IoTDB also supports two ways of viewing devices: SQL statement is as follows: -``` -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' +```sql +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; ``` You can get results below: @@ -1279,9 +1279,9 @@ To view devices' information with database, we can use `SHOW DEVICES WITH DATABA SQL statement is as follows: -``` -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +```sql +show devices with database; +show devices root.ln.** with database; ``` You can get results below: @@ -1316,10 +1316,10 @@ The above statement is used to count the number of devices. At the same time, it SQL statement is as follows: -``` -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +```sql +show devices; +count devices; +count devices root.ln.**; ``` You can get results below: diff --git a/src/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md b/src/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md index 5bc7d549b..d3ec0bfe3 100644 --- a/src/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md @@ -27,9 +27,9 @@ According to the storage model we can set up the corresponding database. Two SQL statements are supported for creating databases, as follows: -``` -IoTDB > create database root.ln -IoTDB > create database root.sgcc +```sql +create database root.ln; +create database root.sgcc; ``` We can thus create two databases using the above two SQL statements. @@ -38,11 +38,11 @@ It is worth noting that 1 database is recommended. When the path itself or the parent/child layer of the path is already created as database, the path is then not allowed to be created as database. For example, it is not feasible to create `root.ln.wf01` as database when two databases `root.ln` and `root.sgcc` exist. The system gives the corresponding error prompt as shown below: -``` -IoTDB> CREATE DATABASE root.ln.wf01 -Msg: 300: root.ln has already been created as database. -IoTDB> create database root.ln.wf01 -Msg: 300: root.ln has already been created as database. +```sql +CREATE DATABASE root.ln.wf01; +Msg: 300: root.ln has already been created as database; +create database root.ln.wf01; +Msg: 300: root.ln has already been created as database; ``` Database Node Naming Rules: @@ -59,9 +59,9 @@ Besides, if deploy on Windows system, the LayerName is case-insensitive, which m After creating the database, we can use the [SHOW DATABASES](../SQL-Manual/SQL-Manual_timecho) statement and [SHOW DATABASES \](../SQL-Manual/SQL-Manual_timecho) to view the databases. The SQL statements are as follows: -``` -IoTDB> SHOW DATABASES -IoTDB> SHOW DATABASES root.** +```sql +SHOW DATABASES; +SHOW DATABASES root.**; ``` The result is as follows: @@ -81,11 +81,11 @@ It costs 0.060s User can use the `DELETE DATABASE ` statement to delete all databases matching the pathPattern. Please note the data in the database will also be deleted. -``` -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// delete all data, all timeseries and all databases -IoTDB > DELETE DATABASE root.** +```sql +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// delete all data, all timeseries and all databases; +DELETE DATABASE root.**; ``` ### 1.4 Count Databases @@ -94,11 +94,11 @@ User can use the `COUNT DATABASE ` statement to count the number of SQL statement is as follows: -``` -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +```sql +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` The result is as follows: @@ -176,13 +176,13 @@ Users can set any heterogeneous parameters when creating a Database, or adjust s The user can set any of the above heterogeneous parameters when creating a Database. The SQL statement is as follows: -``` +```sql CREATE DATABASE prefixPath (WITH databaseAttributeClause (COMMA? databaseAttributeClause)*)? ``` For example: -``` +```sql CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTOR=3, SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -190,13 +190,13 @@ CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTO Users can adjust some heterogeneous parameters during the IoTDB runtime, as shown in the following SQL statement: -``` -ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)* +```sql +ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)*; ``` For example: -``` +```sql ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -209,14 +209,16 @@ Note that only the following heterogeneous parameters can be adjusted at runtime The user can query the specific heterogeneous configuration of each Database, and the SQL statement is as follows: -``` +```sql SHOW DATABASES DETAILS prefixPath? ``` For example: +```sql +SHOW DATABASES DETAILS +``` ``` -IoTDB> SHOW DATABASES DETAILS +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ |Database| TTL|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|MinSchemaRegionGroupNum|MaxSchemaRegionGroupNum|DataRegionGroupNum|MinDataRegionGroupNum|MaxDataRegionGroupNum| +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ @@ -278,7 +280,8 @@ The set ttl operation can be understood as setting a TTL rule, for example, sett The unset ttl operation indicates unmounting TTL for the corresponding path pattern; if there is no corresponding TTL, nothing will be done. If you want to set TTL to be infinitely large, you can use the INF keyword. The SQL Statement for setting TTL is as follow: -``` + +```sql set ttl to pathPattern 360000; ``` Set the Time to Live (TTL) to a pathPattern of 360,000 milliseconds; the pathPattern should not contain a wildcard (\*) in the middle and must end with a double asterisk (\*\*). The pathPattern is used to match corresponding devices. @@ -289,25 +292,28 @@ It is also permissible to specify a particular device without a wildcard (*). To unset TTL, we can use follwing SQL statement: -``` -IoTDB> unset ttl from root.ln +```sql +unset ttl from root.ln ``` After unset TTL, all data will be accepted in `root.ln`. -``` -IoTDB> unset ttl from root.sgcc.** + +```sql +unset ttl from root.sgcc.** ``` Unset the TTL in the `root.sgcc` path. New syntax -``` -IoTDB> unset ttl from root.** + +```sql +unset ttl from root.** ``` Old syntax -``` -IoTDB> unset ttl to root.** + +```sql +unset ttl to root.** ``` There is no functional difference between the old and new syntax, and they are compatible with each other. The new syntax is just more conventional in terms of wording. @@ -320,8 +326,10 @@ To Show TTL, we can use following SQL statement: show all ttl +```sql +SHOW ALL TTL; +``` ``` -IoTDB> SHOW ALL TTL +--------------+--------+ | path| TTL| | root.**|55555555| @@ -330,8 +338,10 @@ IoTDB> SHOW ALL TTL ``` show ttl on pathPattern +```sql +SHOW TTL ON root.db.**; +``` ``` -IoTDB> SHOW TTL ON root.db.**; +--------------+--------+ | path| TTL| | root.db.**|55555555| @@ -343,8 +353,10 @@ The SHOW ALL TTL example gives the TTL for all path patterns. The SHOW TTL ON pathPattern shows the TTL for the path pattern specified. Display devices' ttl +```sql +show devices; +``` ``` -IoTDB> show devices +---------------+---------+---------+ | Device|IsAligned| TTL| +---------------+---------+---------+ @@ -361,36 +373,36 @@ All devices will definitely have a TTL, meaning it cannot be null. INF represent According to the storage model selected before, we can create corresponding timeseries in the two databases respectively. The SQL statements for creating timeseries are as follows: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +```sql +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` From v0.13, you can use a simplified version of the SQL statements to create timeseries: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT +```sql +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` When creating a timeseries, the system will automatically assign default encoding and compression methods, requiring no manual specification. If your business scenario requires custom adjustments, you may refer to the following example: -```shell -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY +```sql +create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY; ``` Note that if you manually specify an encoding method that is incompatible with the data type, the system will return an error message, as shown below: -``` -IoTDB > create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF +```sql +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF; error: encoding TS_2DIFF does not support BOOLEAN ``` @@ -401,8 +413,8 @@ For a full list of supported data types and corresponding encoding methods, plea The SQL statement for creating a group of timeseries are as follows: -``` -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +```sql +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` You can set different datatype, encoding, and compression for the timeseries in a group of aligned timeseries @@ -415,11 +427,11 @@ To delete the timeseries we created before, we are able to use `(DELETE | DROP) The usage are as follows: -``` -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +```sql +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 2.4 Show Timeseries @@ -440,9 +452,9 @@ Examples: returns all timeseries information matching the given <`PathPattern`>. SQL statements are as follows: -``` -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** +```sql +show timeseries root.**; +show timeseries root.ln.**; ``` The results are shown below respectively: @@ -478,7 +490,7 @@ It costs 0.004s returns all the timeseries information start from the offset and limit the number of series returned. For example, -``` +```sql show timeseries root.ln.** limit 10 offset 10 ``` @@ -486,7 +498,7 @@ show timeseries root.ln.** limit 10 offset 10 The query result set is filtered by string fuzzy matching based on the names of the timeseries. For example: -``` +```sql show timeseries root.ln.** where timeseries contains 'wf01.wt' ``` @@ -507,7 +519,7 @@ It costs 0.016s The query result set is filtered by data type. For example: -``` +```sql show timeseries root.ln.** where dataType=FLOAT ``` @@ -532,9 +544,9 @@ It costs 0.016s The query result set is filtered by tags. For example: -``` -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` The query results are as follows: @@ -576,16 +588,16 @@ IoTDB is able to use `COUNT TIMESERIES ` to count the number of timeseries * `LEVEL` could be defined to show count the number of timeseries of each node at the given level in current Metadata Tree. This could be used to query the number of sensors under each device. The grammar is: `COUNT TIMESERIES GROUP BY LEVEL=`. -``` -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +```sql +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' ; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; ``` For example, if there are several timeseries (use `show timeseries` to show all timeseries): @@ -612,10 +624,10 @@ Then the Metadata Tree will be as below: As can be seen, `root` is considered as `LEVEL=0`. So when you enter statements such as: -``` -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` You will get following results: @@ -657,11 +669,11 @@ By adding WHERE time filter conditions to the existing SHOW/COUNT TIMESERIES, we It is important to note that in metadata queries with time filters, views are not considered; only the time series actually stored in the TsFile are taken into account. An example usage is as follows: -``` -IoTDB> insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); -IoTDB> insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); -IoTDB> insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); -IoTDB> show timeseries; +```sql +insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); +insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); +insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); +show timeseries; +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ | Timeseries|Alias|Database|DataType|Encoding|Compression|Tags|Attributes|Deadband|DeadbandParameters|ViewType| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ @@ -673,7 +685,7 @@ IoTDB> show timeseries; |root.sg.data2.s2| null| root.sg| FLOAT| GORILLA| LZ4|null| null| null| null| BASE| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ -IoTDB> show timeseries where time >= 15000 and time < 16000; +show timeseries where time >= 15000 and time < 16000; +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ | Timeseries|Alias|Database|DataType|Encoding|Compression|Tags|Attributes|Deadband|DeadbandParameters|ViewType| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ @@ -683,7 +695,7 @@ IoTDB> show timeseries where time >= 15000 and time < 16000; |root.sg.data2.s2| null| root.sg| FLOAT| GORILLA| LZ4|null| null| null| null| BASE| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ -IoTDB> count timeseries where time >= 15000 and time < 16000; +count timeseries where time >= 15000 and time < 16000; +-----------------+ |count(timeseries)| +-----------------+ @@ -702,8 +714,8 @@ The differences between tag and attribute are: The SQL statements for creating timeseries with extra tag and attribute information are extended as follows: -``` -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +```sql +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` The `temprature` in the brackets is an alias for the sensor `s1`. So we can use `temprature` to replace `s1` anywhere. @@ -716,31 +728,31 @@ We can update the tag information after creating it as following: * Rename the tag/attribute key -``` +```sql ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 ``` * Reset the tag/attribute value -``` +```sql ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 ``` * Delete the existing tag/attribute -``` +```sql ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 ``` * Add new tags -``` +```sql ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 ``` * Add new attributes -``` +```sql ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 ``` @@ -748,23 +760,23 @@ ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 > add alias or a new key-value if the alias or key doesn't exist, otherwise, update the old one with new value. -``` +```sql ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) ``` * Show timeseries using tags. Use TAGS(tagKey) to identify the tags used as filter key -``` +```sql SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause ``` returns all the timeseries information that satisfy the where condition and match the pathPattern. SQL statements are as follows: -``` -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` The results are shown below respectly: @@ -789,23 +801,23 @@ It costs 0.004s - count timeseries using tags -``` -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +```sql +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` returns all the number of timeseries that satisfy the where condition and match the pathPattern. SQL statements are as follows: -``` -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +```sql +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` The results are shown below respectly : -``` -IoTDB> count timeseries +```sql +count timeseries; +-----------------+ |count(timeseries)| +-----------------+ @@ -813,7 +825,7 @@ IoTDB> count timeseries +-----------------+ Total line number = 1 It costs 0.019s -IoTDB> count timeseries root.** where TAGS(unit)='c' +count timeseries root.** where TAGS(unit)='c'; +-----------------+ |count(timeseries)| +-----------------+ @@ -821,7 +833,7 @@ IoTDB> count timeseries root.** where TAGS(unit)='c' +-----------------+ Total line number = 1 It costs 0.020s -IoTDB> count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries root.** where TAGS(unit)='c' group by level = 2; +--------------+-----------------+ | column|count(timeseries)| +--------------+-----------------+ @@ -837,14 +849,14 @@ It costs 0.011s create aligned timeseries -``` +```sql create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) ``` The execution result is as follows: -``` -IoTDB> show timeseries +```sql +show timeseries +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -855,8 +867,8 @@ IoTDB> show timeseries Support query: -``` -IoTDB> show timeseries where TAGS(tag1)='v1' +```sql +show timeseries where TAGS(tag1)='v1' +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -922,7 +934,7 @@ To make it more convenient and efficient to express multiple time series, IoTDB ### 3.5 Show Child Paths -``` +```sql SHOW CHILD PATHS pathPattern ``` @@ -950,7 +962,7 @@ It costs 0.002s ### 3.6 Show Child Nodes -``` +```sql SHOW CHILD NODES pathPattern ``` @@ -987,11 +999,11 @@ IoTDB is able to use `COUNT NODES LEVEL=` to count the nu This could be used to query the number of devices with specified measurements. The usage are as follows: -``` -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.** LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +```sql +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.** LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` As for the above mentioned example and Metadata tree, you can get following results: @@ -1044,10 +1056,10 @@ Similar to `Show Timeseries`, IoTDB also supports two ways of viewing devices: SQL statement is as follows: -``` -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' +```sql +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; ``` You can get results below: @@ -1084,9 +1096,9 @@ To view devices' information with database, we can use `SHOW DEVICES WITH DATABA SQL statement is as follows: -``` -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +```sql +show devices with database; +show devices root.ln.** with database; ``` You can get results below: @@ -1121,10 +1133,10 @@ The above statement is used to count the number of devices. At the same time, it SQL statement is as follows: -``` -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +```sql +show devices; +count devices; +count devices root.ln.**; ``` You can get results below: @@ -1160,11 +1172,11 @@ It costs 0.004s ### 3.10 Active Device Query Similar to active timeseries query, we can add time filter conditions to device viewing and statistics to query active devices that have data within a certain time range. The definition of active here is the same as for active time series. An example usage is as follows: -``` -IoTDB> insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); -IoTDB> insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); -IoTDB> insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); -IoTDB> show devices; +```sql +insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); +insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); +insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); +show devices; +-------------------+---------+ | devices|isAligned| +-------------------+---------+ @@ -1173,7 +1185,7 @@ IoTDB> show devices; | root.sg.data3| false| +-------------------+---------+ -IoTDB> show devices where time >= 15000 and time < 16000; +show devices where time >= 15000 and time < 16000; +-------------------+---------+ | devices|isAligned| +-------------------+---------+ @@ -1181,7 +1193,7 @@ IoTDB> show devices where time >= 15000 and time < 16000; | root.sg.data2| false| +-------------------+---------+ -IoTDB> count devices where time >= 15000 and time < 16000; +count devices where time >= 15000 and time < 16000; +--------------+ |count(devices)| +--------------+ diff --git a/src/UserGuide/latest/Basic-Concept/Query-Data_apache.md b/src/UserGuide/latest/Basic-Concept/Query-Data_apache.md index 3f50f61cf..5c70722ff 100644 --- a/src/UserGuide/latest/Basic-Concept/Query-Data_apache.md +++ b/src/UserGuide/latest/Basic-Concept/Query-Data_apache.md @@ -374,8 +374,10 @@ which means: Query and return the last data points of timeseries prefixPath.path **Example 1:** get the last point of root.ln.wf01.wt01.status: +```sql +select last status from root.ln.wf01.wt01 +``` ``` -IoTDB> select last status from root.ln.wf01.wt01 +-----------------------------+------------------------+-----+--------+ | Time| timeseries|value|dataType| +-----------------------------+------------------------+-----+--------+ @@ -387,8 +389,10 @@ It costs 0.000s **Example 2:** get the last status and temperature points of root.ln.wf01.wt01, whose timestamp larger or equal to 2017-11-07T23:50:00。 +```sql +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +``` ``` -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -401,8 +405,10 @@ It costs 0.002s **Example 3:** get the last points of all sensor in root.ln.wf01.wt01, and order the result by the timeseries column in descending order +```sql +select last * from root.ln.wf01.wt01 order by timeseries desc; +``` ``` -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -415,8 +421,10 @@ It costs 0.002s **Example 4:** get the last points of all sensor in root.ln.wf01.wt01, and order the result by the dataType column in descending order +```sql +select last * from root.ln.wf01.wt01 order by dataType desc; +``` ``` -IoTDB> select last * from root.ln.wf01.wt01 order by dataType desc; +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -429,8 +437,10 @@ It costs 0.002s **Note:** The requirement to query the latest data point with other filtering conditions can be implemented through function composition. For example: +```sql +select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +``` ``` -IoTDB> select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +-----------------+---------------------+----------------+-----------------------+------------------+ | Device|max_time(temperature)|max_time(status)|last_value(temperature)|last_value(status)| +-----------------+---------------------+----------------+-----------------------+------------------+ @@ -547,8 +557,10 @@ In the value filter condition, for TEXT type data, use `Like` and `Regexp` opera **Example 1:** Query data containing `'cc'` in `value` under `root.sg.d1`. +```sql +select * from root.sg.d1 where value like '%cc%' +``` ``` -IoTDB> select * from root.sg.d1 where value like '%cc%' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -561,8 +573,10 @@ It costs 0.002s **Example 2:** Query data that consists of 3 characters and the second character is `'b'` in `value` under `root.sg.d1`. +```sql +select * from root.sg.device where value like '_b_' +``` ``` -IoTDB> select * from root.sg.device where value like '_b_' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -587,8 +601,10 @@ Beginning with a: ^a.* **Example 1:** Query a string composed of 26 English characters for the value under root.sg.d1 +```sql +select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +``` ``` -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -601,8 +617,10 @@ It costs 0.002s **Example 2:** Query root.sg.d1 where the value value is a string composed of 26 lowercase English characters and the time is greater than 100 +```sql +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +``` ``` -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -1607,16 +1625,16 @@ you can use the `HAVING` clause after the `GROUP BY` clause. > The following usages are incorrect: > > ```sql -> select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 -> select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 +> select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +> select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; > ``` > > 2.When filtering the `GROUP BY LEVEL` result, the PATH in `SELECT` and `HAVING` can only have one node. > The following usages are incorrect: > > ```sql -> select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 -> select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +> select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +> select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; > ``` Here are a few examples of using the 'HAVING' clause to filter aggregate results. @@ -2727,8 +2745,10 @@ For examples: - **Example 1** (aligned by time) -```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +```sql +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +``` +``` +--------------+-------------------+--------+ | source column| target timeseries| written| +--------------+-------------------+--------+ @@ -2757,8 +2777,10 @@ We can see that the writing of the `INTO` clause is very flexible as long as the - **Example 2** (aligned by time) -```shell -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +```sql +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +``` +``` +--------------------------------------+-------------------------+--------+ | source column| target timeseries| written| +--------------------------------------+-------------------------+--------+ @@ -2774,8 +2796,10 @@ This statement stores the results of an aggregated query into the specified time - **Example 3** (aligned by device) -```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +```sql +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +``` +``` +--------------+--------------+-------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+-------------------+--------+ @@ -2797,8 +2821,10 @@ This statement also writes the query results of the four time series under the ` - **Example 4** (aligned by device) -```shell -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +```sql +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +``` +``` +--------------+--------------+------------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+------------------------+--------+ @@ -2958,8 +2984,10 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root ETL the original data and write a new time series. -```shell -IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +```sql +SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +``` +``` +--------------+-------------------+---------------------------+--------+ | source device| source column| target timeseries| written| +--------------+-------------------+---------------------------+--------+ @@ -2977,8 +3005,10 @@ IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) Persistently store the query results, which acts like a materialized view. -```shell -IOTDB > SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +```sql +SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +``` +``` +--------------------------+-----------------------------+--------+ | source column| target timeseries| written| +--------------------------+-----------------------------+--------+ @@ -2996,8 +3026,10 @@ Rewrite non-aligned time series into another aligned time series. **Note:** It is recommended to use the `LIMIT & OFFSET` clause or the `WHERE` clause (time filter) to batch data to prevent excessive data volume in a single operation. -```shell -IOTDB > SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +```sql +SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +``` +``` +--------------------------+----------------------+--------+ | source column| target timeseries| written| +--------------------------+----------------------+--------+ diff --git a/src/UserGuide/latest/Basic-Concept/Query-Data_timecho.md b/src/UserGuide/latest/Basic-Concept/Query-Data_timecho.md index c9a38b5fd..a1a6a368c 100644 --- a/src/UserGuide/latest/Basic-Concept/Query-Data_timecho.md +++ b/src/UserGuide/latest/Basic-Concept/Query-Data_timecho.md @@ -114,7 +114,7 @@ SELECT [LAST] selectExpr [, selectExpr] ... The SQL statement is: ```sql -select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` which means: @@ -374,8 +374,10 @@ which means: Query and return the last data points of timeseries prefixPath.path **Example 1:** get the last point of root.ln.wf01.wt01.status: +```sql +select last status from root.ln.wf01.wt01; +``` ``` -IoTDB> select last status from root.ln.wf01.wt01 +-----------------------------+------------------------+-----+--------+ | Time| timeseries|value|dataType| +-----------------------------+------------------------+-----+--------+ @@ -387,8 +389,10 @@ It costs 0.000s **Example 2:** get the last status and temperature points of root.ln.wf01.wt01, whose timestamp larger or equal to 2017-11-07T23:50:00。 +```sql +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; +``` ``` -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -401,8 +405,10 @@ It costs 0.002s **Example 3:** get the last points of all sensor in root.ln.wf01.wt01, and order the result by the timeseries column in descending order +```sql +select last * from root.ln.wf01.wt01 order by timeseries desc; +``` ``` -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -415,8 +421,10 @@ It costs 0.002s **Example 4:** get the last points of all sensor in root.ln.wf01.wt01, and order the result by the dataType column in descending order +```sql +select last * from root.ln.wf01.wt01 order by dataType desc; +``` ``` -IoTDB> select last * from root.ln.wf01.wt01 order by dataType desc; +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -428,9 +436,10 @@ It costs 0.002s ``` **Note:** The requirement to query the latest data point with other filtering conditions can be implemented through function composition. For example: - +```sql +select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device; +``` ``` -IoTDB> select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +-----------------+---------------------+----------------+-----------------------+------------------+ | Device|max_time(temperature)|max_time(status)|last_value(temperature)|last_value(status)| +-----------------+---------------------+----------------+-----------------------+------------------+ @@ -547,8 +556,10 @@ In the value filter condition, for TEXT type data, use `Like` and `Regexp` opera **Example 1:** Query data containing `'cc'` in `value` under `root.sg.d1`. +```sql +select * from root.sg.d1 where value like '%cc%' +``` ``` -IoTDB> select * from root.sg.d1 where value like '%cc%' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -561,8 +572,10 @@ It costs 0.002s **Example 2:** Query data that consists of 3 characters and the second character is `'b'` in `value` under `root.sg.d1`. +```sql +select * from root.sg.device where value like '_b_'; +``` ``` -IoTDB> select * from root.sg.device where value like '_b_' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -587,8 +600,10 @@ Beginning with a: ^a.* **Example 1:** Query a string composed of 26 English characters for the value under root.sg.d1 +```sql +select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +``` ``` -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -601,8 +616,10 @@ It costs 0.002s **Example 2:** Query root.sg.d1 where the value value is a string composed of 26 lowercase English characters and the time is greater than 100 +```sql +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +``` ``` -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -1607,16 +1624,16 @@ you can use the `HAVING` clause after the `GROUP BY` clause. > The following usages are incorrect: > > ```sql -> select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 -> select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 +> select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +> select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; > ``` > > 2.When filtering the `GROUP BY LEVEL` result, the PATH in `SELECT` and `HAVING` can only have one node. > The following usages are incorrect: > > ```sql -> select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 -> select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +> select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +> select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; > ``` Here are a few examples of using the 'HAVING' clause to filter aggregate results. @@ -1638,7 +1655,7 @@ Aggregation result 1: Aggregation result filtering query 1: ```sql - select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1 + select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1; ``` Filtering result 1: @@ -1675,7 +1692,7 @@ Aggregation result 2: Aggregation result filtering query 2: ```sql - select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device + select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` Filtering result 2: @@ -1706,7 +1723,7 @@ In IoTDB, users can use the FILL clause to specify the fill mode when data is mi **The following is the syntax definition of the `FILL` clause:** ```sql -FILL '(' PREVIOUS | LINEAR | constant ')' +FILL '(' PREVIOUS | LINEAR | constant ')'; ``` **Note:** @@ -2286,7 +2303,7 @@ It costs 0.005s If the parameter N/SN of LIMIT/SLIMIT clause exceeds the allowable maximum value (N/SN is of type int64), the system prompts errors. For example, executing the following SQL statement: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 9223372036854775808 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 9223372036854775808; ``` The SQL statement will not be executed and the corresponding error prompt is given as follows: @@ -2298,7 +2315,7 @@ Msg: 416: Out of range. LIMIT : N should be Int64. If the parameter N/SN of LIMIT/SLIMIT clause is not a positive intege, the system prompts errors. For example, executing the following SQL statement: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 13.1 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 13.1; ``` The SQL statement will not be executed and the corresponding error prompt is given as follows: @@ -2310,7 +2327,7 @@ Msg: 401: line 1:129 mismatched input '.' expecting {, ';'} If the parameter OFFSET of LIMIT clause exceeds the size of the result set, IoTDB will return an empty result set. For example, executing the following SQL statement: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 2 offset 6 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 limit 2 offset 6; ``` The result is shown below: @@ -2327,7 +2344,7 @@ It costs 0.005s If the parameter SOFFSET of SLIMIT clause is not smaller than the number of available timeseries, the system prompts errors. For example, executing the following SQL statement: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 2 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 2; ``` The SQL statement will not be executed and the corresponding error prompt is given as follows: @@ -2443,7 +2460,7 @@ The result below indicates `ORDER BY DEVICE ASC,TIME ASC` is the clause in defau Besides,`ALIGN BY DEVICE` and `ORDER BY` clauses can be used with aggregate query,the SQL statement is: ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` The result shows below: @@ -2491,7 +2508,7 @@ Here are several examples of queries for sorting arbitrary expressions using the When you need to sort the results based on the base score score, you can use the following SQL: ```Sql -select score from root.** order by score desc align by device +select score from root.** order by score desc align by device; ``` This will give you the following results: @@ -2543,7 +2560,7 @@ If you want to sort the results based on the total score and, in case of tied sc select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device + time desc align by device; ``` Here are the results: @@ -2571,7 +2588,7 @@ Here are the results: In the `ORDER BY` clause, you can also use aggregate query expressions. For example: ```Sql -select min_value(total) from root.** order by min_value(total) asc align by device +select min_value(total) from root.** order by min_value(total) asc align by device; ``` This will give you the following results: @@ -2591,7 +2608,7 @@ This will give you the following results: When specifying multiple columns in the query, the unsorted columns will change order along with the rows and sorted columns. The order of rows when the sorting columns are the same may vary depending on the specific implementation (no fixed order). For example: ```Sql -select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; ``` This will give you the following results: @@ -2612,7 +2629,7 @@ This will give you the following results: You can use both `ORDER BY DEVICE,TIME` and `ORDER BY EXPRESSION` together. For example: ```Sql -select score from root.** order by device asc, score desc, time asc align by device +select score from root.** order by device asc, score desc, time asc align by device; ``` This will give you the following results: @@ -2727,8 +2744,10 @@ For examples: - **Example 1** (aligned by time) -```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +```sql +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +``` +``` +--------------+-------------------+--------+ | source column| target timeseries| written| +--------------+-------------------+--------+ @@ -2757,8 +2776,10 @@ We can see that the writing of the `INTO` clause is very flexible as long as the - **Example 2** (aligned by time) -```shell -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +```sql +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +``` +``` +--------------------------------------+-------------------------+--------+ | source column| target timeseries| written| +--------------------------------------+-------------------------+--------+ @@ -2774,8 +2795,10 @@ This statement stores the results of an aggregated query into the specified time - **Example 3** (aligned by device) -```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +```sql +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +``` +``` +--------------+--------------+-------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+-------------------+--------+ @@ -2797,8 +2820,10 @@ This statement also writes the query results of the four time series under the ` - **Example 4** (aligned by device) -```shell -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +```sql +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +``` +``` +--------------+--------------+------------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+------------------------+--------+ @@ -2958,8 +2983,10 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root ETL the original data and write a new time series. -```shell -IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +```sql +SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +``` +``` +--------------+-------------------+---------------------------+--------+ | source device| source column| target timeseries| written| +--------------+-------------------+---------------------------+--------+ @@ -2977,8 +3004,10 @@ IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) Persistently store the query results, which acts like a materialized view. -```shell -IOTDB > SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +```sql +SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +``` +``` +--------------------------+-----------------------------+--------+ | source column| target timeseries| written| +--------------------------+-----------------------------+--------+ @@ -2996,8 +3025,10 @@ Rewrite non-aligned time series into another aligned time series. **Note:** It is recommended to use the `LIMIT & OFFSET` clause or the `WHERE` clause (time filter) to batch data to prevent excessive data volume in a single operation. -```shell -IOTDB > SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +```sql +SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +``` +``` +--------------------------+----------------------+--------+ | source column| target timeseries| written| +--------------------------+----------------------+--------+ diff --git a/src/UserGuide/latest/SQL-Manual/SQL-Manual_apache.md b/src/UserGuide/latest/SQL-Manual/SQL-Manual_apache.md index 2b4603537..7806b1a60 100644 --- a/src/UserGuide/latest/SQL-Manual/SQL-Manual_apache.md +++ b/src/UserGuide/latest/SQL-Manual/SQL-Manual_apache.md @@ -28,33 +28,33 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 1.1 Create Database ```sql -IoTDB > create database root.ln -IoTDB > create database root.sgcc +create database root.ln; +create database root.sgcc; ``` ### 1.2 Show Databases ```sql -IoTDB> SHOW DATABASES -IoTDB> SHOW DATABASES root.** +SHOW DATABASES; +SHOW DATABASES root.**; ``` ### 1.3 Delete Database ```sql -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// delete all data, all timeseries and all databases -IoTDB > DELETE DATABASE root.** +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// delete all data, all timeseries and all databases; +DELETE DATABASE root.**; ``` ### 1.4 Count Databases ```sql -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` ### 1.5 Setting up heterogeneous databases (Advanced operations) @@ -74,7 +74,7 @@ ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; #### Show heterogeneous databases ```sql -SHOW DATABASES DETAILS +SHOW DATABASES DETAILS; ``` ### 1.6 TTL @@ -82,25 +82,25 @@ SHOW DATABASES DETAILS #### Set TTL ```sql -IoTDB> set ttl to root.ln 3600000 -IoTDB> set ttl to root.sgcc.** 3600000 -IoTDB> set ttl to root.** 3600000 +set ttl to root.ln 3600000; +set ttl to root.sgcc.** 3600000; +set ttl to root.** 3600000; ``` #### Unset TTL ```sql -IoTDB> unset ttl from root.ln -IoTDB> unset ttl from root.sgcc.** -IoTDB> unset ttl from root.** +unset ttl from root.ln; +unset ttl from root.sgcc.**; +unset ttl from root.**; ``` #### Show TTL ```sql -IoTDB> SHOW ALL TTL -IoTDB> SHOW TTL ON StorageGroupNames -IoTDB> SHOW DEVICES +SHOW ALL TTL; +SHOW TTL ON StorageGroupNames; +SHOW DEVICES; ``` ## 2. DEVICE TEMPLATE @@ -120,13 +120,13 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad **Example 1:** Create a template containing two non-aligned timeseires ```sql -IoTDB> create device template t1 (temperature FLOAT, status BOOLEAN) +create device template t1 (temperature FLOAT, status BOOLEAN); ``` **Example 2:** Create a template containing a group of aligned timeseires ```sql -IoTDB> create device template t2 aligned (lat FLOAT, lon FLOAT) +create device template t2 aligned (lat FLOAT, lon FLOAT); ``` The` lat` and `lon` measurements are aligned. @@ -134,52 +134,52 @@ The` lat` and `lon` measurements are aligned. ### 2.2 Set Device Template ```sql -IoTDB> set device template t1 to root.sg1.d1 +set device template t1 to root.sg1.d1; ``` ### 2.3 Activate Device Template ```sql -IoTDB> set device template t1 to root.sg1.d1 -IoTDB> set device template t2 to root.sg1.d2 -IoTDB> create timeseries using device template on root.sg1.d1 -IoTDB> create timeseries using device template on root.sg1.d2 +set device template t1 to root.sg1.d1; +set device template t2 to root.sg1.d2; +create timeseries using device template on root.sg1.d1; +create timeseries using device template on root.sg1.d2; ``` ### 2.4 Show Device Template ```sql -IoTDB> show device templates -IoTDB> show nodes in device template t1 -IoTDB> show paths set device template t1 -IoTDB> show paths using device template t1 +show device templates; +show nodes in device template t1; +show paths set device template t1; +show paths using device template t1; ``` ### 2.5 Deactivate Device Template ```sql -IoTDB> delete timeseries of device template t1 from root.sg1.d1 -IoTDB> deactivate device template t1 from root.sg1.d1 -IoTDB> delete timeseries of device template t1 from root.sg1.*, root.sg2.* -IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* +delete timeseries of device template t1 from root.sg1.d1; +deactivate device template t1 from root.sg1.d1; +delete timeseries of device template t1 from root.sg1.*, root.sg2.*; +deactivate device template t1 from root.sg1.*, root.sg2.*; ``` ### 2.6 Unset Device Template ```sql -IoTDB> unset device template t1 from root.sg1.d1 +unset device template t1 from root.sg1.d1; ``` ### 2.7 Drop Device Template ```sql -IoTDB> drop device template t1 +drop device template t1; ``` ### 2.8 Alter Device Template ```sql -IoTDB> alter device template t1 add (speed FLOAT) +alter device template t1 add (speed FLOAT); ``` ## 3. TIMESERIES MANAGEMENT @@ -189,108 +189,108 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 3.1 Create Timeseries ```sql -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - From v0.13, you can use a simplified version of the SQL statements to create timeseries: ```sql -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - Notice that when in the CREATE TIMESERIES statement the encoding method conflicts with the data type, the system gives the corresponding error prompt as shown below: ```sql -IoTDB > create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN -error: encoding TS_2DIFF does not support BOOLEAN +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN; +error: encoding TS_2DIFF does not support BOOLEAN; ``` ### 3.2 Create Aligned Timeseries ```sql -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT) +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT); ``` ### 3.3 Delete Timeseries ```sql -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 3.4 Show Timeseries ```sql -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** -IoTDB> show timeseries root.ln.** limit 10 offset 10 -IoTDB> show timeseries root.ln.** where timeseries contains 'wf01.wt' -IoTDB> show timeseries root.ln.** where dataType=FLOAT +show timeseries root.**; +show timeseries root.ln.**; +show timeseries root.ln.** limit 10 offset 10; +show timeseries root.ln.** where timeseries contains 'wf01.wt'; +show timeseries root.ln.** where dataType=FLOAT; ``` ### 3.5 Count Timeseries ```sql -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' ; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` ### 3.6 Tag and Attribute Management ```sql -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` * Rename the tag/attribute key ```SQL -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` * Reset the tag/attribute value ```SQL -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` * Delete the existing tag/attribute ```SQL -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` * Add new tags ```SQL -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` * Add new attributes ```SQL -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` * Upsert alias, tags and attributes @@ -298,49 +298,51 @@ ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 > add alias or a new key-value if the alias or key doesn't exist, otherwise, update the old one with new value. ```SQL -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4); ``` * Show timeseries using tags. Use TAGS(tagKey) to identify the tags used as filter key ```SQL -SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause +SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; ``` returns all the timeseries information that satisfy the where condition and match the pathPattern. SQL statements are as follows: ```SQL -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` - count timeseries using tags ```SQL -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` returns all the number of timeseries that satisfy the where condition and match the pathPattern. SQL statements are as follows: ```SQL -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` create aligned timeseries ```SQL -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` The execution result is as follows: ```SQL -IoTDB> show timeseries +show timeseries; +``` +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -352,7 +354,9 @@ IoTDB> show timeseries Support query: ```SQL -IoTDB> show timeseries where TAGS(tag1)='v1' +show timeseries where TAGS(tag1)='v1'; +``` +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -369,40 +373,40 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 4.1 Show Child Paths ```SQL -SHOW CHILD PATHS pathPattern +SHOW CHILD PATHS pathPattern; ``` ### 4.2 Show Child Nodes ```SQL -SHOW CHILD NODES pathPattern +SHOW CHILD NODES pathPattern; ``` ### 4.3 Count Nodes ```SQL -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.** LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.** LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` ### 4.4 Show Devices ```SQL -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; +show devices with database; +show devices root.ln.** with database; ``` ### 4.5 Count Devices ```SQL -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +show devices; +count devices; +count devices root.ln.**; ``` ## 5. INSERT & LOAD DATA @@ -416,30 +420,30 @@ For more details, see document [Write-Data](../Basic-Concept/Write-Data_apache). - Insert Single Timeseries ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) -IoTDB > insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1') +insert into root.ln.wf02.wt02(timestamp,status) values(1,true); +insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'); ``` - Insert Multiple Timeseries ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (2, false, 'v2') -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4') +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (2, false, 'v2'); +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4'); ``` - Use the Current System Timestamp as the Timestamp of the Data Point ```SQL -IoTDB > insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2') +insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2'); ``` #### Insert Data Into Aligned Timeseries ```SQL -IoTDB > create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE) -IoTDB > insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1) -IoTDB > insert into root.sg1.d1(time, s1, s2) aligned values(2, 2, 2), (3, 3, 3) -IoTDB > select * from root.sg1.d1 +create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE); +insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1); +insert into root.sg1.d1(time, s1, s2) aligned values(2, 2, 2), (3, 3, 3); +select * from root.sg1.d1; ``` ### 5.2 Load External TsFile Tool @@ -476,34 +480,34 @@ For more details, see document [Write-Delete-Data](../Basic-Concept/Write-Data_a ### 6.1 Delete Single Timeseries ```sql -IoTDB > delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.status where time < 10 -IoTDB > delete from root.ln.wf02.wt02.status where time <= 10 -IoTDB > delete from root.ln.wf02.wt02.status where time < 20 and time > 10 -IoTDB > delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10 -IoTDB > delete from root.ln.wf02.wt02.status where time > 20 -IoTDB > delete from root.ln.wf02.wt02.status where time >= 20 -IoTDB > delete from root.ln.wf02.wt02.status where time = 20 -IoTDB > delete from root.ln.wf02.wt02.status where time > 4 or time < 0 -Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic -expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' -IoTDB > delete from root.ln.wf02.wt02.status +delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.status where time < 10; +delete from root.ln.wf02.wt02.status where time <= 10; +delete from root.ln.wf02.wt02.status where time < 20 and time > 10; +delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10; +delete from root.ln.wf02.wt02.status where time > 20; +delete from root.ln.wf02.wt02.status where time >= 20; +delete from root.ln.wf02.wt02.status where time = 20; +delete from root.ln.wf02.wt02.status where time > 4 or time < 0; +Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic; +expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND'; +delete from root.ln.wf02.wt02.status; ``` ### 6.2 Delete Multiple Timeseries ```sql -IoTDB > delete from root.ln.wf02.wt02 where time <= 2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; -IoTDB> delete from root.ln.wf03.wt02.status where time < now() +delete from root.ln.wf02.wt02 where time <= 2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; +delete from root.ln.wf03.wt02.status where time < now(); Msg: The statement is executed successfully. ``` ### 6.3 Delete Time Partition (experimental) ```sql -IoTDB > DELETE PARTITION root.ln 0,1,2 +DELETE PARTITION root.ln 0,1,2; ``` ## 7. QUERY DATA @@ -537,31 +541,31 @@ SELECT [LAST] selectExpr [, selectExpr] ... #### Select a Column of Data Based on a Time Interval ```sql -IoTDB > select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` #### Select Multiple Columns of Data Based on a Time Interval ```sql -IoTDB > select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; +select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; ``` #### Select Multiple Columns of Data for the Same Device According to Multiple Time Intervals ```sql -IoTDB > select status,temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); +select status,temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); ``` #### Choose Multiple Columns of Data for Different Devices According to Multiple Time Intervals ```sql -IoTDB > select wf01.wt01.status,wf02.wt02.hardware from root.ln where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); +select wf01.wt01.status,wf02.wt02.hardware from root.ln where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); ``` #### Order By Time Query ```sql -IoTDB > select * from root.ln.** where time > 1 order by time desc limit 10; +select * from root.ln.** where time > 1 order by time desc limit 10; ``` ### 7.2 `SELECT` CLAUSE @@ -569,7 +573,7 @@ IoTDB > select * from root.ln.** where time > 1 order by time desc limit 10; #### Use Alias ```sql -IoTDB > select s1 as temperature, s2 as speed from root.ln.wf01.wt01; +select s1 as temperature, s2 as speed from root.ln.wf01.wt01; ``` #### Nested Expressions @@ -577,35 +581,35 @@ IoTDB > select s1 as temperature, s2 as speed from root.ln.wf01.wt01; ##### Nested Expressions with Time Series Query ```sql -IoTDB > select a, +select a, b, ((a + 1) * 2 - 1) % 2 + 1.5, sin(a + sin(a + sin(b))), -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 from root.sg1; -IoTDB > select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; -IoTDB > select (a + *) / 2 from root.sg1 +select (a + *) / 2 from root.sg1; -IoTDB > select (a + b) * 3 from root.sg, root.ln +select (a + b) * 3 from root.sg, root.ln; ``` ##### Nested Expressions query with aggregations ```sql -IoTDB > select avg(temperature), +select avg(temperature), sin(avg(temperature)), avg(temperature) + 1, -sum(hardware), avg(temperature) + sum(hardware) from root.ln.wf01.wt01; -IoTDB > select avg(*), +select avg(*), (avg(*) + 1) * 3 / 2 -1 -from root.sg1 +from root.sg1; -IoTDB > select avg(temperature), +select avg(temperature), sin(avg(temperature)), avg(temperature) + 1, -sum(hardware), @@ -617,10 +621,10 @@ GROUP BY([10, 90), 10ms); #### Last Query ```sql -IoTDB > select last status from root.ln.wf01.wt01 -IoTDB > select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 -IoTDB > select last * from root.ln.wf01.wt01 order by timeseries desc; -IoTDB > select last * from root.ln.wf01.wt01 order by dataType desc; +select last status from root.ln.wf01.wt01; +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; +select last * from root.ln.wf01.wt01 order by timeseries desc; +select last * from root.ln.wf01.wt01 order by dataType desc; ``` ### 7.3 `WHERE` CLAUSE @@ -628,22 +632,22 @@ IoTDB > select last * from root.ln.wf01.wt01 order by dataType desc; #### Time Filter ```sql -IoTDB > select s1 from root.sg1.d1 where time > 2022-01-01T00:05:00.000; -IoTDB > select s1 from root.sg1.d1 where time = 2022-01-01T00:05:00.000; -IoTDB > select s1 from root.sg1.d1 where time >= 2022-01-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; +select s1 from root.sg1.d1 where time > 2022-01-01T00:05:00.000; +select s1 from root.sg1.d1 where time = 2022-01-01T00:05:00.000; +select s1 from root.sg1.d1 where time >= 2022-01-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; ``` #### Value Filter ```sql -IoTDB > select temperature from root.sg1.d1 where temperature > 36.5; -IoTDB > select status from root.sg1.d1 where status = true; -IoTDB > select temperature from root.sg1.d1 where temperature between 36.5 and 40; -IoTDB > select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -IoTDB > select code from root.sg1.d1 where code in ('200', '300', '400', '500'); -IoTDB > select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); -IoTDB > select code from root.sg1.d1 where temperature is null; -IoTDB > select code from root.sg1.d1 where temperature is not null; +select temperature from root.sg1.d1 where temperature > 36.5; +select status from root.sg1.d1 where status = true; +select temperature from root.sg1.d1 where temperature between 36.5 and 40; +select temperature from root.sg1.d1 where temperature not between 36.5 and 40; +select code from root.sg1.d1 where code in ('200', '300', '400', '500'); +select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); +select code from root.sg1.d1 where temperature is null; +select code from root.sg1.d1 where temperature is not null; ``` #### Fuzzy Query @@ -651,15 +655,15 @@ IoTDB > select code from root.sg1.d1 where temperature is not null; - Fuzzy matching using `Like` ```sql -IoTDB > select * from root.sg.d1 where value like '%cc%' -IoTDB > select * from root.sg.device where value like '_b_' +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; ``` - Fuzzy matching using `Regexp` ```sql -IoTDB > select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -IoTDB > select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; ``` ### 7.4 `GROUP BY` CLAUSE @@ -667,91 +671,91 @@ IoTDB > select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 - Aggregate By Time without Specifying the Sliding Step Length ```sql -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d); +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d); ``` - Aggregate By Time Specifying the Sliding Step Length ```sql -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d); +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d); ``` - Aggregate by Natural Month ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by([2017-11-01T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); -IoTDB > select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); +select count(status) from root.ln.wf01.wt01 group by([2017-11-01T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); +select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); ``` - Left Open And Right Close Range ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d); +select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d); ``` - Aggregation By Variation ```sql -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10); ``` - Aggregation By Condition ```sql -IoTDB > select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=true) -IoTDB > select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=true); +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=false); ``` - Aggregation By Session ```sql -IoTDB > select __endTime,count(*) from root.** group by session(1d) -IoTDB > select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,count(*) from root.** group by session(1d); +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` - Aggregation By Count ```sql -IoTDB > select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) -IoTDB > select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` - Aggregation By Level ```sql -IoTDB > select count(status) from root.** group by level = 1 -IoTDB > select count(status) from root.** group by level = 3 -IoTDB > select count(status) from root.** group by level = 1, 3 -IoTDB > select max_value(temperature) from root.** group by level = 0 -IoTDB > select count(*) from root.ln.** group by level = 2 +select count(status) from root.** group by level = 1; +select count(status) from root.** group by level = 3; +select count(status) from root.** group by level = 1, 3; +select max_value(temperature) from root.** group by level = 0; +select count(*) from root.ln.** group by level = 2; ``` - Aggregate By Time with Level Clause ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d), level=1; -IoTDB > select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; +select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d), level=1; +select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; ``` - Aggregation query by one single tag ```sql -IoTDB > SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); +SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); ``` - Aggregation query by multiple tags ```sql -IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); +SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); ``` - Downsampling Aggregation by tags based on Time Window ```sql -IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS(city, workshop); +SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS(city, workshop); ``` ### 7.5 `HAVING` CLAUSE @@ -759,17 +763,17 @@ IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5 Correct: ```sql -IoTDB > select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1 -IoTDB > select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device +select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1; +select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` Incorrect: ```sql -IoTDB > select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 -IoTDB > select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 -IoTDB > select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 -IoTDB > select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; +select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` ### 7.6 `FILL` CLAUSE @@ -777,7 +781,7 @@ IoTDB > select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having su #### `PREVIOUS` Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(previous); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(previous); ``` #### `PREVIOUS` FILL and specify the fill timeout threshold @@ -788,14 +792,14 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: #### `LINEAR` Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(linear); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(linear); ``` #### Constant Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(2.0); -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(true); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(2.0); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(true); ``` ### 7.7 `LIMIT` and `SLIMIT` CLAUSES (PAGINATION) @@ -803,24 +807,24 @@ IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-1 #### Row Control over Query Results ```sql -IoTDB > select status, temperature from root.ln.wf01.wt01 limit 10 -IoTDB > select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 -IoTDB > select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 2 offset 3 -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 10; +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 2 offset 3; +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 5 offset 3; ``` #### Column Control over Query Results ```sql -IoTDB > select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 -IoTDB > select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 -IoTDB > select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` #### Row and Column Control over Query Results ```sql -IoTDB > select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` ### 7.8 `ORDER BY` CLAUSE @@ -828,31 +832,31 @@ IoTDB > select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 #### Order by in ALIGN BY TIME mode ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; ``` #### Order by in ALIGN BY DEVICE mode ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc,time asc align by device; -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; -IoTDB > select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc,time asc align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` #### Order by arbitrary expressions ```sql -IoTDB > select score from root.** order by score desc align by device -IoTDB > select score,total from root.one order by base+score+bonus desc -IoTDB > select score,total from root.one order by total desc -IoTDB > select base, score, bonus, total from root.** order by total desc NULLS Last, +select score from root.** order by score desc align by device; +select score,total from root.one order by base+score+bonus desc; +select score,total from root.one order by total desc; +select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device -IoTDB > select min_value(total) from root.** order by min_value(total) asc align by device -IoTDB > select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device -IoTDB > select score from root.** order by device asc, score desc, time asc align by device + time desc align by device; +select min_value(total) from root.** order by min_value(total) asc align by device; +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; +select score from root.** order by device asc, score desc, time asc align by device; ``` ### 7.9 `ALIGN BY` CLAUSE @@ -860,54 +864,54 @@ IoTDB > select score from root.** order by device asc, score desc, time asc alig #### Align by Device ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` ### 7.10 `INTO` CLAUSE (QUERY WRITE-BACK) ```sql -IoTDB > select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; -IoTDB > select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); -IoTDB > select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; -IoTDB > select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; ``` - Using variable placeholders: ```sql -IoTDB > select s1, s2 +select s1, s2 into root.sg_copy.d1(::), root.sg_copy.d2(s1), root.sg_copy.d1(${3}), root.sg_copy.d2(::) from root.sg.d1, root.sg.d2; -IoTDB > select d1.s1, d1.s2, d2.s3, d3.s4 +select d1.s1, d1.s2, d2.s3, d3.s4 into ::(s1_1, s2_2), root.sg.d2_2(s3_3), root.${2}_copy.::(s4) from root.sg; -IoTDB > select * into root.sg_bk.::(::) from root.sg.**; +select * into root.sg_bk.::(::) from root.sg.**; -IoTDB > select s1, s2, s3, s4 +select s1, s2, s3, s4 into root.backup_sg.d1(s1, s2, s3, s4), root.backup_sg.d2(::), root.sg.d3(backup_${4}) from root.sg.d1, root.sg.d2, root.sg.d3 align by device; -IoTDB > select avg(s1), sum(s2) + sum(s3), count(s4) +select avg(s1), sum(s2) + sum(s3), count(s4) into root.agg_${2}.::(avg_s1, sum_s2_add_s3, count_s4) from root.** align by device; -IoTDB > select * into ::(backup_${4}) from root.sg.** align by device; +select * into ::(backup_${4}) from root.sg.** align by device; -IoTDB > select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` ## 8. Maintennance Generate the corresponding query plan: ```sql -explain select s1,s2 from root.sg.d1 +explain select s1,s2 from root.sg.d1; ``` Execute the corresponding SQL, analyze the execution and output: ```sql -explain analyze select s1,s2 from root.sg.d1 order by s1 +explain analyze select s1,s2 from root.sg.d1 order by s1; ``` ## 9. OPERATOR @@ -918,7 +922,7 @@ For more details, see document [Operator-and-Expression](./Operator-and-Expressi For details and examples, see the document [Arithmetic Operators and Functions](./Operator-and-Expression.md#_1-1-arithmetic-operators). ```sql -select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1 +select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1; ``` ### 9.2 Comparison Operators @@ -934,12 +938,12 @@ select temperature from root.sg1.d1 where temperature between 36.5 and 40; select temperature from root.sg1.d1 where temperature not between 36.5 and 40; # Fuzzy matching operator: Use `Like` for fuzzy matching -select * from root.sg.d1 where value like '%cc%' -select * from root.sg.device where value like '_b_' +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; # Fuzzy matching operator: Use `Regexp` for fuzzy matching -select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; select b, b like '1%', b regexp '[0-2]' from root.test; # `IS NULL` operator @@ -1002,25 +1006,25 @@ For details and examples, see the document [String Processing](./Operator-and-Ex ```sql select s1, string_contains(s1, 's'='warn') from root.sg1.d4; select s1, string_matches(s1, 'regex'='[^\\s]+37229') from root.sg1.d4; -select s1, length(s1) from root.sg1.d1 -select s1, locate(s1, "target"="1") from root.sg1.d1 -select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 -select s1, startswith(s1, "target"="1") from root.sg1.d1 -select s1, endswith(s1, "target"="1") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 -select s1, substring(s1 from 1 for 2) from root.sg1.d1 -select s1, replace(s1, 'es', 'tt') from root.sg1.d1 -select s1, upper(s1) from root.sg1.d1 -select s1, lower(s1) from root.sg1.d1 -select s3, trim(s3) from root.sg1.d1 -select s1, s2, strcmp(s1, s2) from root.sg1.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1 -select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 +select s1, length(s1) from root.sg1.d1; +select s1, locate(s1, "target"="1") from root.sg1.d1; +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1; +select s1, startswith(s1, "target"="1") from root.sg1.d1; +select s1, endswith(s1, "target"="1") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1; +select s1, substring(s1 from 1 for 2) from root.sg1.d1; +select s1, replace(s1, 'es', 'tt') from root.sg1.d1; +select s1, upper(s1) from root.sg1.d1; +select s1, lower(s1) from root.sg1.d1; +select s3, trim(s3) from root.sg1.d1; +select s1, s2, strcmp(s1, s2) from root.sg1.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1; +select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1; +select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1; ``` ### 10.5 Data Type Conversion Function @@ -1028,7 +1032,7 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 For details and examples, see the document [Data Type Conversion Function](./Operator-and-Expression.md#_2-5-data-type-conversion-function). ```sql -SELECT cast(s1 as INT32) from root.sg +SELECT cast(s1 as INT32) from root.sg; ``` ### 10.6 Constant Timeseries Generating Functions @@ -1076,8 +1080,8 @@ select equal_size_bucket_agg_sample(temperature, 'type'='avg','proportion'='0.1' select equal_size_bucket_m4_sample(temperature, 'proportion'='0.1') as M4_sample from root.ln.wf01.wt01; select equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='avg', 'number'='2') as outlier_avg_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='stendis', 'number'='2') as outlier_stendis_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='cos', 'number'='2') as outlier_cos_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='prenextdis', 'number'='2') as outlier_prenextdis_sample from root.ln.wf01.wt01; -select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1 -select M4(s1,'windowSize'='10') from root.vehicle.d1 +select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1; +select M4(s1,'windowSize'='10') from root.vehicle.d1; ``` ### 10.11 Change Points Function @@ -1085,7 +1089,7 @@ select M4(s1,'windowSize'='10') from root.vehicle.d1 For details and examples, see the document [Time-Series](./Operator-and-Expression.md#_2-11-change-points-function). ```sql -select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1 +select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1; ``` ## 11. DATA QUALITY FUNCTION LIBRARY @@ -1098,23 +1102,23 @@ For details and examples, see the document [Data-Quality](../SQL-Manual/UDF-Libr ```sql # Completeness -select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; # Consistency -select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; # Timeliness -select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; # Validity -select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; # Accuracy -select Accuracy(t1,t2,t3,m1,m2,m3) from root.test +select Accuracy(t1,t2,t3,m1,m2,m3) from root.test; ``` ### 11.2 Data Profiling @@ -1123,78 +1127,78 @@ For details and examples, see the document [Data-Profiling](../SQL-Manual/UDF-Li ```sql # ACF -select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05 +select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05; # Distinct -select distinct(s2) from root.test.d2 +select distinct(s2) from root.test.d2; # Histogram -select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1 +select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1; # Integral -select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 -select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10 +select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; +select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10; # IntegralAvg -select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 +select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; # Mad -select mad(s0) from root.test -select mad(s0, "error"="0.01") from root.test +select mad(s0) from root.test; +select mad(s0, "error"="0.01") from root.test; # Median -select median(s0, "error"="0.01") from root.test +select median(s0, "error"="0.01") from root.test; # MinMax -select minmax(s1) from root.test +select minmax(s1) from root.test; # Mode -select mode(s2) from root.test.d2 +select mode(s2) from root.test.d2; # MvAvg -select mvavg(s1, "window"="3") from root.test +select mvavg(s1, "window"="3") from root.test; # PACF -select pacf(s1, "lag"="5") from root.test +select pacf(s1, "lag"="5") from root.test; # Percentile -select percentile(s0, "rank"="0.2", "error"="0.01") from root.test +select percentile(s0, "rank"="0.2", "error"="0.01") from root.test; # Quantile -select quantile(s0, "rank"="0.2", "K"="800") from root.test +select quantile(s0, "rank"="0.2", "K"="800") from root.test; # Period -select period(s1) from root.test.d3 +select period(s1) from root.test.d3; # QLB -select QLB(s1) from root.test.d1 +select QLB(s1) from root.test.d1; # Resample -select resample(s1,'every'='5m','interp'='linear') from root.test.d1 -select resample(s1,'every'='30m','aggr'='first') from root.test.d1 -select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1 +select resample(s1,'every'='5m','interp'='linear') from root.test.d1; +select resample(s1,'every'='30m','aggr'='first') from root.test.d1; +select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1; # Sample -select sample(s1,'method'='reservoir','k'='5') from root.test.d1 -select sample(s1,'method'='isometric','k'='5') from root.test.d1 +select sample(s1,'method'='reservoir','k'='5') from root.test.d1; +select sample(s1,'method'='isometric','k'='5') from root.test.d1; # Segment -select segment(s1, "error"="0.1") from root.test +select segment(s1, "error"="0.1") from root.test; # Skew -select skew(s1) from root.test.d1 +select skew(s1) from root.test.d1; # Spline -select spline(s1, "points"="151") from root.test +select spline(s1, "points"="151") from root.test; # Spread -select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 +select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; # Stddev -select stddev(s1) from root.test.d1 +select stddev(s1) from root.test.d1; # ZScore -select zscore(s1) from root.test +select zscore(s1) from root.test; ``` ### 11.3 Anomaly Detection @@ -1203,33 +1207,33 @@ For details and examples, see the document [Anomaly-Detection](../SQL-Manual/UDF ```sql # IQR -select iqr(s1) from root.test +select iqr(s1) from root.test; # KSigma -select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30; # LOF -select lof(s1,s2) from root.test.d1 where time<1000 -select lof(s1, "method"="series") from root.test.d1 where time<1000 +select lof(s1,s2) from root.test.d1 where time<1000; +select lof(s1, "method"="series") from root.test.d1 where time<1000; # MissDetect -select missdetect(s2,'minlen'='10') from root.test.d2 +select missdetect(s2,'minlen'='10') from root.test.d2; # Range -select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30; # TwoSidedFilter -select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test +select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test; # Outlier -select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test +select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test; # MasterTrain -select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test +select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test; # MasterDetect -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test; ``` ### 11.4 Frequency Domain @@ -1238,30 +1242,30 @@ For details and examples, see the document [Frequency-Domain](../SQL-Manual/UDF- ```sql # Conv -select conv(s1,s2) from root.test.d2 +select conv(s1,s2) from root.test.d2; # Deconv -select deconv(s3,s2) from root.test.d2 -select deconv(s3,s2,'result'='remainder') from root.test.d2 +select deconv(s3,s2) from root.test.d2; +select deconv(s3,s2,'result'='remainder') from root.test.d2; # DWT -select dwt(s1,"method"="haar") from root.test.d1 +select dwt(s1,"method"="haar") from root.test.d1; # FFT -select fft(s1) from root.test.d1 -select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1 +select fft(s1) from root.test.d1; +select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1; # HighPass -select highpass(s1,'wpass'='0.45') from root.test.d1 +select highpass(s1,'wpass'='0.45') from root.test.d1; # IFFT -select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1 +select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1; # LowPass -select lowpass(s1,'wpass'='0.45') from root.test.d1 +select lowpass(s1,'wpass'='0.45') from root.test.d1; # Envelope -select envelope(s1) from root.test.d1 +select envelope(s1) from root.test.d1; ``` ### 11.5 Data Matching @@ -1270,19 +1274,19 @@ For details and examples, see the document [Data-Matching](../SQL-Manual/UDF-Lib ```sql # Cov -select cov(s1,s2) from root.test.d2 +select cov(s1,s2) from root.test.d2; # DTW -select dtw(s1,s2) from root.test.d2 +select dtw(s1,s2) from root.test.d2; # Pearson -select pearson(s1,s2) from root.test.d2 +select pearson(s1,s2) from root.test.d2; # PtnSym -select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1 +select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1; # XCorr -select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 +select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05; ``` ### 11.6 Data Repairing @@ -1291,23 +1295,23 @@ For details and examples, see the document [Data-Repairing](../SQL-Manual/UDF-Li ```sql # TimestampRepair -select timestamprepair(s1,'interval'='10000') from root.test.d2 -select timestamprepair(s1) from root.test.d2 +select timestamprepair(s1,'interval'='10000') from root.test.d2; +select timestamprepair(s1) from root.test.d2; # ValueFill -select valuefill(s1) from root.test.d2 -select valuefill(s1,"method"="previous") from root.test.d2 +select valuefill(s1) from root.test.d2; +select valuefill(s1,"method"="previous") from root.test.d2; # ValueRepair -select valuerepair(s1) from root.test.d2 -select valuerepair(s1,'method'='LsGreedy') from root.test.d2 +select valuerepair(s1) from root.test.d2; +select valuerepair(s1,'method'='LsGreedy') from root.test.d2; # MasterRepair -select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test +select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test; # SeasonalRepair -select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2 -select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 +select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2; +select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2; ``` ### 11.7 Series Discovery @@ -1316,11 +1320,11 @@ For details and examples, see the document [Series-Discovery](../SQL-Manual/UDF- ```sql # ConsecutiveSequences -select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1 -select consecutivesequences(s1,s2) from root.test.d1 +select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1; +select consecutivesequences(s1,s2) from root.test.d1; # ConsecutiveWindows -select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 +select consecutivewindows(s1,s2,'length'='10m') from root.test.d1; ``` ### 11.8 Machine Learning @@ -1329,13 +1333,13 @@ For details and examples, see the document [Machine-Learning](../SQL-Manual/UDF- ```sql # AR -select ar(s0,"p"="2") from root.test.d0 +select ar(s0,"p"="2") from root.test.d0; # Representation -select representation(s0,"tb"="3","vb"="2") from root.test.d0 +select representation(s0,"tb"="3","vb"="2") from root.test.d0; # RM -select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 +select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0; ``` ## 12. CONDITIONAL EXPRESSION @@ -1348,24 +1352,24 @@ when 1000=1050 then "bad temperature" when P<=1000000 or P>=1100000 then "bad pressure" end as `result` -from root.test1 +from root.test1; select str, case when str like "%cc%" then "has cc" when str like "%dd%" then "has dd" else "no cc and dd" end as `result` -from root.test2 +from root.test2; select count(case when x<=1 then 1 end) as `(-∞,1]`, count(case when 1 +DROP (CONTINUOUS QUERY | CQ) ; ``` #### Altering continuous queries @@ -1557,9 +1561,9 @@ DROP FUNCTION ### 15.3 UDF Queries ```sql -SELECT example(*) from root.sg.d1 -SELECT example(s1, *) from root.sg.d1 -SELECT example(*, *) from root.sg.d1 +SELECT example(*) from root.sg.d1; +SELECT example(s1, *) from root.sg.d1; +SELECT example(*, *) from root.sg.d1; SELECT example(s1, 'key1'='value1', 'key2'='value2'), example(*, 'key3'='value3') FROM root.sg.d1; SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; @@ -1585,43 +1589,43 @@ For more details, see document [Authority Management](../User-Manual/Authority-M - Create user (Requires MANAGE_USER permission) ```SQL -CREATE USER -eg: CREATE USER user1 'passwd' +CREATE USER ; +eg: CREATE USER user1 'passwd'; ``` - Delete user (Requires MANAGE_USER permission) ```sql -DROP USER -eg: DROP USER user1 +DROP USER ; +eg: DROP USER user1; ``` - Create role (Requires MANAGE_ROLE permission) ```sql -CREATE ROLE -eg: CREATE ROLE role1 +CREATE ROLE ; +eg: CREATE ROLE role1; ``` - Delete role (Requires MANAGE_ROLE permission) ```sql -DROP ROLE -eg: DROP ROLE role1 +DROP ROLE ; +eg: DROP ROLE role1; ``` - Grant role to user (Requires MANAGE_ROLE permission) ```sql -GRANT ROLE TO -eg: GRANT ROLE admin TO user1 +GRANT ROLE TO ; +eg: GRANT ROLE admin TO user1; ``` - Revoke role from user(Requires MANAGE_ROLE permission) ```sql -REVOKE ROLE FROM -eg: REVOKE ROLE admin FROM user1 +REVOKE ROLE FROM ; +eg: REVOKE ROLE admin FROM user1; ``` - List all user (Requires MANAGE_USER permission) @@ -1639,15 +1643,15 @@ LIST ROLE - List all users granted specific role.(Requires MANAGE_USER permission) ```sql -LIST USER OF ROLE -eg: LIST USER OF ROLE roleuser +LIST USER OF ROLE ; +eg: LIST USER OF ROLE roleuser; ``` - List all role granted to specific user. ```sql -LIST ROLE OF USER -eg: LIST ROLE OF USER tempuser +LIST ROLE OF USER ; +eg: LIST ROLE OF USER tempuser; ``` - List all privileges of user @@ -1696,13 +1700,13 @@ eg: REVOKE ALL ON root.** FROM USER user1; #### Delete Time Partition (experimental) ```sql -Eg: IoTDB > DELETE PARTITION root.ln 0,1,2 +Eg: DELETE PARTITION root.ln 0,1,2; ``` #### Continuous Query,CQ ```sql -Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END +Eg: CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END; ``` #### Maintenance Command @@ -1710,42 +1714,42 @@ Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO - FLUSH ```sql -Eg: IoTDB > flush +Eg: flush ``` - MERGE ```sql -Eg: IoTDB > MERGE -Eg: IoTDB > FULL MERGE +Eg: MERGE; +Eg: FULL MERGE; ``` - CLEAR CACHE ```sql -Eg: IoTDB > CLEAR CACHE +Eg: CLEAR CACHE ``` - START REPAIR DATA ```sql -Eg: IoTDB > START REPAIR DATA +Eg: START REPAIR DATA ``` - STOP REPAIR DATA ```sql -Eg: IoTDB > STOP REPAIR DATA +Eg: STOP REPAIR DATA ``` - SET SYSTEM TO READONLY / WRITABLE ```sql -Eg: IoTDB > SET SYSTEM TO READONLY / WRITABLE +Eg: SET SYSTEM TO READONLY / WRITABLE ``` - Query abort ```sql -Eg: IoTDB > KILL QUERY 1 +Eg: KILL QUERY 1 ``` \ No newline at end of file diff --git a/src/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md b/src/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md index 9683a27ad..9316c895d 100644 --- a/src/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md +++ b/src/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md @@ -28,33 +28,33 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 1.1 Create Database ```sql -IoTDB > create database root.ln -IoTDB > create database root.sgcc +create database root.ln; +create database root.sgcc; ``` ### 1.2 Show Databases ```sql -IoTDB> SHOW DATABASES -IoTDB> SHOW DATABASES root.** +SHOW DATABASES; +SHOW DATABASES root.**; ``` ### 1.3 Delete Database ```sql -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// delete all data, all timeseries and all databases -IoTDB > DELETE DATABASE root.** +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// delete all data, all timeseries and all databases; +DELETE DATABASE root.**; ``` ### 1.4 Count Databases ```sql -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` ### 1.5 Setting up heterogeneous databases (Advanced operations) @@ -74,7 +74,7 @@ ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; #### Show heterogeneous databases ```sql -SHOW DATABASES DETAILS +SHOW DATABASES DETAILS; ``` ### 1.6 TTL @@ -82,25 +82,25 @@ SHOW DATABASES DETAILS #### Set TTL ```sql -IoTDB> set ttl to root.ln 3600000 -IoTDB> set ttl to root.sgcc.** 3600000 -IoTDB> set ttl to root.** 3600000 +set ttl to root.ln 3600000; +set ttl to root.sgcc.** 3600000; +set ttl to root.** 3600000; ``` #### Unset TTL ```sql -IoTDB> unset ttl from root.ln -IoTDB> unset ttl from root.sgcc.** -IoTDB> unset ttl from root.** +unset ttl from root.ln; +unset ttl from root.sgcc.**; +unset ttl from root.**; ``` #### Show TTL ```sql -IoTDB> SHOW ALL TTL -IoTDB> SHOW TTL ON StorageGroupNames -IoTDB> SHOW DEVICES +SHOW ALL TTL; +SHOW TTL ON StorageGroupNames; +SHOW DEVICES; ``` ## 2. TIMESERIES MANAGEMENT @@ -110,108 +110,108 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 2.1 Create Timeseries ```sql -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - From v0.13, you can use a simplified version of the SQL statements to create timeseries: ```sql -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - Notice that when in the CREATE TIMESERIES statement the encoding method conflicts with the data type, the system gives the corresponding error prompt as shown below: ```sql -IoTDB > create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN; error: encoding TS_2DIFF does not support BOOLEAN ``` ### 2.2 Create Aligned Timeseries ```sql -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT) +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT); ``` ### 2.3 Delete Timeseries ```sql -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 2.4 Show Timeseries ```sql -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** -IoTDB> show timeseries root.ln.** limit 10 offset 10 -IoTDB> show timeseries root.ln.** where timeseries contains 'wf01.wt' -IoTDB> show timeseries root.ln.** where dataType=FLOAT +show timeseries root.**; +show timeseries root.ln.**; +show timeseries root.ln.** limit 10 offset 10; +show timeseries root.ln.** where timeseries contains 'wf01.wt'; +show timeseries root.ln.** where dataType=FLOAT; ``` ### 2.5 Count Timeseries ```sql -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc'; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c'; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c'; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` ### 2.6 Tag and Attribute Management ```sql -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` * Rename the tag/attribute key ```SQL -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` * Reset the tag/attribute value ```SQL -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` * Delete the existing tag/attribute ```SQL -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` * Add new tags ```SQL -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` * Add new attributes ```SQL -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` * Upsert alias, tags and attributes @@ -219,49 +219,51 @@ ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 > add alias or a new key-value if the alias or key doesn't exist, otherwise, update the old one with new value. ```SQL -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4); ``` * Show timeseries using tags. Use TAGS(tagKey) to identify the tags used as filter key ```SQL -SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause +SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; ``` returns all the timeseries information that satisfy the where condition and match the pathPattern. SQL statements are as follows: ```SQL -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` - count timeseries using tags ```SQL -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` returns all the number of timeseries that satisfy the where condition and match the pathPattern. SQL statements are as follows: ```SQL -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` create aligned timeseries ```SQL -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` The execution result is as follows: ```SQL -IoTDB> show timeseries +show timeseries; +``` +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -273,7 +275,9 @@ IoTDB> show timeseries Support query: ```SQL -IoTDB> show timeseries where TAGS(tag1)='v1' +show timeseries where TAGS(tag1)='v1'; +``` +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -290,40 +294,40 @@ For more details, see document [Operate-Metadata](../Basic-Concept/Operate-Metad ### 3.1 Show Child Paths ```SQL -SHOW CHILD PATHS pathPattern +SHOW CHILD PATHS pathPattern; ``` ### 3.2 Show Child Nodes ```SQL -SHOW CHILD NODES pathPattern +SHOW CHILD NODES pathPattern; ``` ### 3.3 Count Nodes ```SQL -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.** LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.** LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` ### 3.4 Show Devices ```SQL -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; +show devices with database; +show devices root.ln.** with database; ``` ### 3.5 Count Devices ```SQL -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +show devices; +count devices; +count devices root.ln.**; ``` ## 4. INSERT & LOAD DATA @@ -337,30 +341,30 @@ For more details, see document [Write-Data](../Basic-Concept/Write-Data_timecho) - Insert Single Timeseries ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) -IoTDB > insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1') +insert into root.ln.wf02.wt02(timestamp,status) values(1,true); +insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'); ``` - Insert Multiple Timeseries ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (2, false, 'v2') -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4') +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (2, false, 'v2'); +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4'); ``` - Use the Current System Timestamp as the Timestamp of the Data Point ```SQL -IoTDB > insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2') +insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2'); ``` #### Insert Data Into Aligned Timeseries ```SQL -IoTDB > create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE) -IoTDB > insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1) -IoTDB > insert into root.sg1.d1(time, s1, s2) aligned values(2, 2, 2), (3, 3, 3) -IoTDB > select * from root.sg1.d1 +create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE); +insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1); +insert into root.sg1.d1(time, s1, s2) aligned values(2, 2, 2), (3, 3, 3); +select * from root.sg1.d1; ``` ### 4.2 Load External TsFile Tool @@ -397,34 +401,34 @@ For more details, see document [Write-Delete-Data](../Basic-Concept/Write-Data_t ### 5.1 Delete Single Timeseries ```sql -IoTDB > delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.status where time < 10 -IoTDB > delete from root.ln.wf02.wt02.status where time <= 10 -IoTDB > delete from root.ln.wf02.wt02.status where time < 20 and time > 10 -IoTDB > delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10 -IoTDB > delete from root.ln.wf02.wt02.status where time > 20 -IoTDB > delete from root.ln.wf02.wt02.status where time >= 20 -IoTDB > delete from root.ln.wf02.wt02.status where time = 20 -IoTDB > delete from root.ln.wf02.wt02.status where time > 4 or time < 0 -Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic -expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' -IoTDB > delete from root.ln.wf02.wt02.status +delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.status where time < 10; +delete from root.ln.wf02.wt02.status where time <= 10; +delete from root.ln.wf02.wt02.status where time < 20 and time > 10; +delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10; +delete from root.ln.wf02.wt02.status where time > 20; +delete from root.ln.wf02.wt02.status where time >= 20; +delete from root.ln.wf02.wt02.status where time = 20; +delete from root.ln.wf02.wt02.status where time > 4 or time < 0; +Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic; +expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND'; +delete from root.ln.wf02.wt02.status; ``` ### 5.2 Delete Multiple Timeseries ```sql -IoTDB > delete from root.ln.wf02.wt02 where time <= 2017-11-01T16:26:00; -IoTDB > delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; -IoTDB> delete from root.ln.wf03.wt02.status where time < now() +delete from root.ln.wf02.wt02 where time <= 2017-11-01T16:26:00; +delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; +delete from root.ln.wf03.wt02.status where time < now(); Msg: The statement is executed successfully. ``` ### 5.3 Delete Time Partition (experimental) ```sql -IoTDB > DELETE PARTITION root.ln 0,1,2 +DELETE PARTITION root.ln 0,1,2; ``` ## 6. QUERY DATA @@ -458,31 +462,31 @@ SELECT [LAST] selectExpr [, selectExpr] ... #### Select a Column of Data Based on a Time Interval ```sql -IoTDB > select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` #### Select Multiple Columns of Data Based on a Time Interval ```sql -IoTDB > select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; +select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; ``` #### Select Multiple Columns of Data for the Same Device According to Multiple Time Intervals ```sql -IoTDB > select status,temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); +select status,temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); ``` #### Choose Multiple Columns of Data for Different Devices According to Multiple Time Intervals ```sql -IoTDB > select wf01.wt01.status,wf02.wt02.hardware from root.ln where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); +select wf01.wt01.status,wf02.wt02.hardware from root.ln where (time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000) or (time >= 2017-11-01T16:35:00.000 and time <= 2017-11-01T16:37:00.000); ``` #### Order By Time Query ```sql -IoTDB > select * from root.ln.** where time > 1 order by time desc limit 10; +select * from root.ln.** where time > 1 order by time desc limit 10; ``` ### 6.2 `SELECT` CLAUSE @@ -490,7 +494,7 @@ IoTDB > select * from root.ln.** where time > 1 order by time desc limit 10; #### Use Alias ```sql -IoTDB > select s1 as temperature, s2 as speed from root.ln.wf01.wt01; +select s1 as temperature, s2 as speed from root.ln.wf01.wt01; ``` #### Nested Expressions @@ -498,35 +502,35 @@ IoTDB > select s1 as temperature, s2 as speed from root.ln.wf01.wt01; ##### Nested Expressions with Time Series Query ```sql -IoTDB > select a, +select a, b, ((a + 1) * 2 - 1) % 2 + 1.5, sin(a + sin(a + sin(b))), -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 from root.sg1; -IoTDB > select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; -IoTDB > select (a + *) / 2 from root.sg1 +select (a + *) / 2 from root.sg1; -IoTDB > select (a + b) * 3 from root.sg, root.ln +select (a + b) * 3 from root.sg, root.ln; ``` ##### Nested Expressions query with aggregations ```sql -IoTDB > select avg(temperature), +select avg(temperature), sin(avg(temperature)), avg(temperature) + 1, -sum(hardware), avg(temperature) + sum(hardware) from root.ln.wf01.wt01; -IoTDB > select avg(*), +select avg(*), (avg(*) + 1) * 3 / 2 -1 -from root.sg1 +from root.sg1; -IoTDB > select avg(temperature), +select avg(temperature), sin(avg(temperature)), avg(temperature) + 1, -sum(hardware), @@ -538,10 +542,10 @@ GROUP BY([10, 90), 10ms); #### Last Query ```sql -IoTDB > select last status from root.ln.wf01.wt01 -IoTDB > select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 -IoTDB > select last * from root.ln.wf01.wt01 order by timeseries desc; -IoTDB > select last * from root.ln.wf01.wt01 order by dataType desc; +select last status from root.ln.wf01.wt01; +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; +select last * from root.ln.wf01.wt01 order by timeseries desc; +select last * from root.ln.wf01.wt01 order by dataType desc; ``` ### 6.3 `WHERE` CLAUSE @@ -549,22 +553,22 @@ IoTDB > select last * from root.ln.wf01.wt01 order by dataType desc; #### Time Filter ```sql -IoTDB > select s1 from root.sg1.d1 where time > 2022-01-01T00:05:00.000; -IoTDB > select s1 from root.sg1.d1 where time = 2022-01-01T00:05:00.000; -IoTDB > select s1 from root.sg1.d1 where time >= 2022-01-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; +select s1 from root.sg1.d1 where time > 2022-01-01T00:05:00.000; +select s1 from root.sg1.d1 where time = 2022-01-01T00:05:00.000; +select s1 from root.sg1.d1 where time >= 2022-01-01T00:05:00.000 and time < 2017-11-01T00:12:00.000; ``` #### Value Filter ```sql -IoTDB > select temperature from root.sg1.d1 where temperature > 36.5; -IoTDB > select status from root.sg1.d1 where status = true; -IoTDB > select temperature from root.sg1.d1 where temperature between 36.5 and 40; -IoTDB > select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -IoTDB > select code from root.sg1.d1 where code in ('200', '300', '400', '500'); -IoTDB > select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); -IoTDB > select code from root.sg1.d1 where temperature is null; -IoTDB > select code from root.sg1.d1 where temperature is not null; +select temperature from root.sg1.d1 where temperature > 36.5; +select status from root.sg1.d1 where status = true; +select temperature from root.sg1.d1 where temperature between 36.5 and 40; +select temperature from root.sg1.d1 where temperature not between 36.5 and 40; +select code from root.sg1.d1 where code in ('200', '300', '400', '500'); +select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); +select code from root.sg1.d1 where temperature is null; +select code from root.sg1.d1 where temperature is not null; ``` #### Fuzzy Query @@ -572,15 +576,15 @@ IoTDB > select code from root.sg1.d1 where temperature is not null; - Fuzzy matching using `Like` ```sql -IoTDB > select * from root.sg.d1 where value like '%cc%' -IoTDB > select * from root.sg.device where value like '_b_' +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; ``` - Fuzzy matching using `Regexp` ```sql -IoTDB > select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -IoTDB > select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; ``` ### 6.4 `GROUP BY` CLAUSE @@ -588,91 +592,91 @@ IoTDB > select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 - Aggregate By Time without Specifying the Sliding Step Length ```sql -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d); +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d); ``` - Aggregate By Time Specifying the Sliding Step Length ```sql -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d); +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d); ``` - Aggregate by Natural Month ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by([2017-11-01T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); -IoTDB > select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); +select count(status) from root.ln.wf01.wt01 group by([2017-11-01T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); +select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019-11-07T23:00:00), 1mo, 2mo); ``` - Left Open And Right Close Range ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d); +select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d); ``` - Aggregation By Variation ```sql -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) -IoTDB > select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10); ``` - Aggregation By Condition ```sql -IoTDB > select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=true) -IoTDB > select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=true); +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoringNull=false); ``` - Aggregation By Session ```sql -IoTDB > select __endTime,count(*) from root.** group by session(1d) -IoTDB > select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,count(*) from root.** group by session(1d); +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` - Aggregation By Count ```sql -IoTDB > select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) -IoTDB > select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` - Aggregation By Level ```sql -IoTDB > select count(status) from root.** group by level = 1 -IoTDB > select count(status) from root.** group by level = 3 -IoTDB > select count(status) from root.** group by level = 1, 3 -IoTDB > select max_value(temperature) from root.** group by level = 0 -IoTDB > select count(*) from root.ln.** group by level = 2 +select count(status) from root.** group by level = 1; +select count(status) from root.** group by level = 3; +select count(status) from root.** group by level = 1, 3; +select max_value(temperature) from root.** group by level = 0; +select count(*) from root.ln.** group by level = 2; ``` - Aggregate By Time with Level Clause ```sql -IoTDB > select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d), level=1; -IoTDB > select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; +select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017-11-07T23:00:00],1d), level=1; +select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; ``` - Aggregation query by one single tag ```sql -IoTDB > SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); +SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); ``` - Aggregation query by multiple tags ```sql -IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); +SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); ``` - Downsampling Aggregation by tags based on Time Window ```sql -IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS(city, workshop); +SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS(city, workshop); ``` ### 6.5 `HAVING` CLAUSE @@ -680,17 +684,17 @@ IoTDB > SELECT avg(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5 Correct: ```sql -IoTDB > select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1 -IoTDB > select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device +select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 1; +select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` Incorrect: ```sql -IoTDB > select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 -IoTDB > select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 -IoTDB > select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 -IoTDB > select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; +select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` ### 6.6 `FILL` CLAUSE @@ -698,7 +702,7 @@ IoTDB > select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having su #### `PREVIOUS` Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(previous); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(previous); ``` #### `PREVIOUS` FILL and specify the fill timeout threshold @@ -709,14 +713,14 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: #### `LINEAR` Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(linear); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(linear); ``` #### Constant Fill ```sql -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(2.0); -IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(true); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(2.0); +select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16:37:00.000 and time <= 2017-11-01T16:40:00.000 fill(true); ``` ### 6.7 `LIMIT` and `SLIMIT` CLAUSES (PAGINATION) @@ -724,24 +728,24 @@ IoTDB > select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-1 #### Row Control over Query Results ```sql -IoTDB > select status, temperature from root.ln.wf01.wt01 limit 10 -IoTDB > select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 -IoTDB > select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 2 offset 3 -IoTDB > select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 10; +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 2 offset 3; +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 5 offset 3; ``` #### Column Control over Query Results ```sql -IoTDB > select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 -IoTDB > select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 -IoTDB > select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` #### Row and Column Control over Query Results ```sql -IoTDB > select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` ### 6.8 `ORDER BY` CLAUSE @@ -749,31 +753,31 @@ IoTDB > select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 #### Order by in ALIGN BY TIME mode ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; ``` #### Order by in ALIGN BY DEVICE mode ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc,time asc align by device; -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; -IoTDB > select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc,time asc align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` #### Order by arbitrary expressions ```sql -IoTDB > select score from root.** order by score desc align by device -IoTDB > select score,total from root.one order by base+score+bonus desc -IoTDB > select score,total from root.one order by total desc -IoTDB > select base, score, bonus, total from root.** order by total desc NULLS Last, +select score from root.** order by score desc align by device; +select score,total from root.one order by base+score+bonus desc; +select score,total from root.one order by total desc; +select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device -IoTDB > select min_value(total) from root.** order by min_value(total) asc align by device -IoTDB > select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device -IoTDB > select score from root.** order by device asc, score desc, time asc align by device + time desc align by device; +select min_value(total) from root.** order by min_value(total) asc align by device; +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; +select score from root.** order by device asc, score desc, time asc align by device; ``` ### 6.9 `ALIGN BY` CLAUSE @@ -781,54 +785,54 @@ IoTDB > select score from root.** order by device asc, score desc, time asc alig #### Align by Device ```sql -IoTDB > select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; +select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` ### 6.10 `INTO` CLAUSE (QUERY WRITE-BACK) ```sql -IoTDB > select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; -IoTDB > select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); -IoTDB > select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; -IoTDB > select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; ``` - Using variable placeholders: ```sql -IoTDB > select s1, s2 +select s1, s2 into root.sg_copy.d1(::), root.sg_copy.d2(s1), root.sg_copy.d1(${3}), root.sg_copy.d2(::) from root.sg.d1, root.sg.d2; -IoTDB > select d1.s1, d1.s2, d2.s3, d3.s4 +select d1.s1, d1.s2, d2.s3, d3.s4 into ::(s1_1, s2_2), root.sg.d2_2(s3_3), root.${2}_copy.::(s4) from root.sg; -IoTDB > select * into root.sg_bk.::(::) from root.sg.**; +select * into root.sg_bk.::(::) from root.sg.**; -IoTDB > select s1, s2, s3, s4 +select s1, s2, s3, s4 into root.backup_sg.d1(s1, s2, s3, s4), root.backup_sg.d2(::), root.sg.d3(backup_${4}) from root.sg.d1, root.sg.d2, root.sg.d3 align by device; -IoTDB > select avg(s1), sum(s2) + sum(s3), count(s4) +select avg(s1), sum(s2) + sum(s3), count(s4) into root.agg_${2}.::(avg_s1, sum_s2_add_s3, count_s4) from root.** align by device; -IoTDB > select * into ::(backup_${4}) from root.sg.** align by device; +select * into ::(backup_${4}) from root.sg.** align by device; -IoTDB > select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` ## 7. Maintennance Generate the corresponding query plan: ```sql -explain select s1,s2 from root.sg.d1 +explain select s1,s2 from root.sg.d1; ``` Execute the corresponding SQL, analyze the execution and output: ```sql -explain analyze select s1,s2 from root.sg.d1 order by s1 +explain analyze select s1,s2 from root.sg.d1 order by s1; ``` ## 8. OPERATOR @@ -839,7 +843,7 @@ For more details, see document [Operator-and-Expression](./Operator-and-Expressi For details and examples, see the document [Arithmetic Operators and Functions](./Operator-and-Expression.md#_1-1-arithmetic-operators). ```sql -select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1 +select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1; ``` ### 8.2 Comparison Operators @@ -847,27 +851,27 @@ select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root For details and examples, see the document [Comparison Operators and Functions](./Operator-and-Expression.md#_1-2-comparison-operators). ```sql -# Basic comparison operators +# Basic comparison operators; select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; -# `BETWEEN ... AND ...` operator +# `BETWEEN ... AND ...` operator; select temperature from root.sg1.d1 where temperature between 36.5 and 40; select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -# Fuzzy matching operator: Use `Like` for fuzzy matching -select * from root.sg.d1 where value like '%cc%' -select * from root.sg.device where value like '_b_' +# Fuzzy matching operator: Use `Like` for fuzzy matching; +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; -# Fuzzy matching operator: Use `Regexp` for fuzzy matching -select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +# Fuzzy matching operator: Use `Regexp` for fuzzy matching; +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; select b, b like '1%', b regexp '[0-2]' from root.test; -# `IS NULL` operator +# `IS NULL` operator; select code from root.sg1.d1 where temperature is null; select code from root.sg1.d1 where temperature is not null; -# `IN` operator +# `IN` operator; select code from root.sg1.d1 where code in ('200', '300', '400', '500'); select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); select a, a in (1, 2) from root.test; @@ -923,25 +927,25 @@ For details and examples, see the document [String Processing](./Operator-and-Ex ```sql select s1, string_contains(s1, 's'='warn') from root.sg1.d4; select s1, string_matches(s1, 'regex'='[^\\s]+37229') from root.sg1.d4; -select s1, length(s1) from root.sg1.d1 -select s1, locate(s1, "target"="1") from root.sg1.d1 -select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 -select s1, startswith(s1, "target"="1") from root.sg1.d1 -select s1, endswith(s1, "target"="1") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 -select s1, substring(s1 from 1 for 2) from root.sg1.d1 -select s1, replace(s1, 'es', 'tt') from root.sg1.d1 -select s1, upper(s1) from root.sg1.d1 -select s1, lower(s1) from root.sg1.d1 -select s3, trim(s3) from root.sg1.d1 -select s1, s2, strcmp(s1, s2) from root.sg1.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1 -select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 +select s1, length(s1) from root.sg1.d1; +select s1, locate(s1, "target"="1") from root.sg1.d1; +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1; +select s1, startswith(s1, "target"="1") from root.sg1.d1; +select s1, endswith(s1, "target"="1") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1; +select s1, substring(s1 from 1 for 2) from root.sg1.d1; +select s1, replace(s1, 'es', 'tt') from root.sg1.d1; +select s1, upper(s1) from root.sg1.d1; +select s1, lower(s1) from root.sg1.d1; +select s3, trim(s3) from root.sg1.d1; +select s1, s2, strcmp(s1, s2) from root.sg1.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1; +select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1; +select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1; ``` ### 9.5 Data Type Conversion Function @@ -949,7 +953,7 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 For details and examples, see the document [Data Type Conversion Function](./Operator-and-Expression.md#_2-5-data-type-conversion-function). ```sql -SELECT cast(s1 as INT32) from root.sg +SELECT cast(s1 as INT32) from root.sg; ``` ### 9.6 Constant Timeseries Generating Functions @@ -997,8 +1001,8 @@ select equal_size_bucket_agg_sample(temperature, 'type'='avg','proportion'='0.1' select equal_size_bucket_m4_sample(temperature, 'proportion'='0.1') as M4_sample from root.ln.wf01.wt01; select equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='avg', 'number'='2') as outlier_avg_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='stendis', 'number'='2') as outlier_stendis_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='cos', 'number'='2') as outlier_cos_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='prenextdis', 'number'='2') as outlier_prenextdis_sample from root.ln.wf01.wt01; -select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1 -select M4(s1,'windowSize'='10') from root.vehicle.d1 +select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1; +select M4(s1,'windowSize'='10') from root.vehicle.d1; ``` ### 9.11 Change Points Function @@ -1006,7 +1010,7 @@ select M4(s1,'windowSize'='10') from root.vehicle.d1 For details and examples, see the document [Time-Series](./Operator-and-Expression.md#_2-11-change-points-function). ```sql -select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1 +select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1; ``` ## 10. DATA QUALITY FUNCTION LIBRARY @@ -1018,24 +1022,24 @@ For more details, see document [Operator-and-Expression](../SQL-Manual/UDF-Libra For details and examples, see the document [Data-Quality](../SQL-Manual/UDF-Libraries.md#data-quality). ```sql -# Completeness -select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Completeness; +select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Consistency -select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Consistency; +select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Timeliness -select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Timeliness; +select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Validity -select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Validity; +select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Accuracy -select Accuracy(t1,t2,t3,m1,m2,m3) from root.test +# Accuracy; +select Accuracy(t1,t2,t3,m1,m2,m3) from root.test; ``` ### 10.2 Data Profiling @@ -1043,79 +1047,79 @@ select Accuracy(t1,t2,t3,m1,m2,m3) from root.test For details and examples, see the document [Data-Profiling](../SQL-Manual/UDF-Libraries.md#data-profiling). ```sql -# ACF -select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05 +# ACF; +select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05; -# Distinct -select distinct(s2) from root.test.d2 +# Distinct; +select distinct(s2) from root.test.d2; -# Histogram -select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1 +# Histogram; +select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1; -# Integral -select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 -select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10 +# Integral; +select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; +select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10; -# IntegralAvg -select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 +# IntegralAvg; +select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; -# Mad -select mad(s0) from root.test -select mad(s0, "error"="0.01") from root.test +# Mad; +select mad(s0) from root.test; +select mad(s0, "error"="0.01") from root.test; -# Median -select median(s0, "error"="0.01") from root.test +# Median; +select median(s0, "error"="0.01") from root.test; -# MinMax -select minmax(s1) from root.test +# MinMax; +select minmax(s1) from root.test; -# Mode -select mode(s2) from root.test.d2 +# Mode; +select mode(s2) from root.test.d2; -# MvAvg -select mvavg(s1, "window"="3") from root.test +# MvAvg; +select mvavg(s1, "window"="3") from root.test; -# PACF -select pacf(s1, "lag"="5") from root.test +# PACF; +select pacf(s1, "lag"="5") from root.test; -# Percentile -select percentile(s0, "rank"="0.2", "error"="0.01") from root.test +# Percentile; +select percentile(s0, "rank"="0.2", "error"="0.01") from root.test; -# Quantile -select quantile(s0, "rank"="0.2", "K"="800") from root.test +# Quantile; +select quantile(s0, "rank"="0.2", "K"="800") from root.test; -# Period -select period(s1) from root.test.d3 +# Period; +select period(s1) from root.test.d3; -# QLB -select QLB(s1) from root.test.d1 +# QLB; +select QLB(s1) from root.test.d1; -# Resample -select resample(s1,'every'='5m','interp'='linear') from root.test.d1 -select resample(s1,'every'='30m','aggr'='first') from root.test.d1 -select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1 +# Resample; +select resample(s1,'every'='5m','interp'='linear') from root.test.d1; +select resample(s1,'every'='30m','aggr'='first') from root.test.d1; +select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1; -# Sample -select sample(s1,'method'='reservoir','k'='5') from root.test.d1 -select sample(s1,'method'='isometric','k'='5') from root.test.d1 +# Sample; +select sample(s1,'method'='reservoir','k'='5') from root.test.d1; +select sample(s1,'method'='isometric','k'='5') from root.test.d1; -# Segment -select segment(s1, "error"="0.1") from root.test +# Segment; +select segment(s1, "error"="0.1") from root.test; -# Skew -select skew(s1) from root.test.d1 +# Skew; +select skew(s1) from root.test.d1; -# Spline -select spline(s1, "points"="151") from root.test +# Spline; +select spline(s1, "points"="151") from root.test; -# Spread -select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 +# Spread; +select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; -# Stddev -select stddev(s1) from root.test.d1 +# Stddev; +select stddev(s1) from root.test.d1; -# ZScore -select zscore(s1) from root.test +# ZScore; +select zscore(s1) from root.test; ``` ### 10.3 Anomaly Detection @@ -1123,34 +1127,34 @@ select zscore(s1) from root.test For details and examples, see the document [Anomaly-Detection](../SQL-Manual/UDF-Libraries.md#anomaly-detection). ```sql -# IQR -select iqr(s1) from root.test +# IQR; +select iqr(s1) from root.test; -# KSigma -select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# KSigma; +select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# LOF -select lof(s1,s2) from root.test.d1 where time<1000 -select lof(s1, "method"="series") from root.test.d1 where time<1000 +# LOF; +select lof(s1,s2) from root.test.d1 where time<1000; +select lof(s1, "method"="series") from root.test.d1 where time<1000; -# MissDetect -select missdetect(s2,'minlen'='10') from root.test.d2 +# MissDetect; +select missdetect(s2,'minlen'='10') from root.test.d2; -# Range -select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# Range; +select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# TwoSidedFilter -select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test +# TwoSidedFilter; +select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test; -# Outlier -select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test +# Outlier; +select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test; -# MasterTrain -select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test +# MasterTrain; +select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test; -# MasterDetect -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test +# MasterDetect; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test; ``` ### 10.4 Frequency Domain @@ -1158,31 +1162,31 @@ select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3 For details and examples, see the document [Frequency-Domain](../SQL-Manual/UDF-Libraries.md#frequency-domain-analysis). ```sql -# Conv -select conv(s1,s2) from root.test.d2 +# Conv; +select conv(s1,s2) from root.test.d2; -# Deconv -select deconv(s3,s2) from root.test.d2 -select deconv(s3,s2,'result'='remainder') from root.test.d2 +# Deconv; +select deconv(s3,s2) from root.test.d2; +select deconv(s3,s2,'result'='remainder') from root.test.d2; -# DWT -select dwt(s1,"method"="haar") from root.test.d1 +# DWT; +select dwt(s1,"method"="haar") from root.test.d1; -# FFT -select fft(s1) from root.test.d1 -select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1 +# FFT; +select fft(s1) from root.test.d1; +select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1; -# HighPass -select highpass(s1,'wpass'='0.45') from root.test.d1 +# HighPass; +select highpass(s1,'wpass'='0.45') from root.test.d1; -# IFFT -select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1 +# IFFT; +select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1; -# LowPass -select lowpass(s1,'wpass'='0.45') from root.test.d1 +# LowPass; +select lowpass(s1,'wpass'='0.45') from root.test.d1; -# Envelope -select envelope(s1) from root.test.d1 +# Envelope; +select envelope(s1) from root.test.d1; ``` ### 10.5 Data Matching @@ -1190,20 +1194,20 @@ select envelope(s1) from root.test.d1 For details and examples, see the document [Data-Matching](../SQL-Manual/UDF-Libraries.md#data-matching). ```sql -# Cov -select cov(s1,s2) from root.test.d2 +# Cov; +select cov(s1,s2) from root.test.d2; -# DTW -select dtw(s1,s2) from root.test.d2 +# DTW; +select dtw(s1,s2) from root.test.d2; -# Pearson -select pearson(s1,s2) from root.test.d2 +# Pearson; +select pearson(s1,s2) from root.test.d2; -# PtnSym -select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1 +# PtnSym; +select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1; -# XCorr -select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 +# XCorr; +select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05; ``` ### 10.6 Data Repairing @@ -1211,24 +1215,24 @@ select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 For details and examples, see the document [Data-Repairing](../SQL-Manual/UDF-Libraries.md#data-repairing). ```sql -# TimestampRepair -select timestamprepair(s1,'interval'='10000') from root.test.d2 -select timestamprepair(s1) from root.test.d2 +# TimestampRepair; +select timestamprepair(s1,'interval'='10000') from root.test.d2; +select timestamprepair(s1) from root.test.d2; -# ValueFill -select valuefill(s1) from root.test.d2 -select valuefill(s1,"method"="previous") from root.test.d2 +# ValueFill; +select valuefill(s1) from root.test.d2; +select valuefill(s1,"method"="previous") from root.test.d2; -# ValueRepair -select valuerepair(s1) from root.test.d2 -select valuerepair(s1,'method'='LsGreedy') from root.test.d2 +# ValueRepair; +select valuerepair(s1) from root.test.d2; +select valuerepair(s1,'method'='LsGreedy') from root.test.d2; -# MasterRepair -select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test +# MasterRepair; +select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test; -# SeasonalRepair -select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2 -select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 +# SeasonalRepair; +select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2; +select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2; ``` ### 10.7 Series Discovery @@ -1236,12 +1240,12 @@ select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 For details and examples, see the document [Series-Discovery](../SQL-Manual/UDF-Libraries.md#series-discovery). ```sql -# ConsecutiveSequences -select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1 -select consecutivesequences(s1,s2) from root.test.d1 +# ConsecutiveSequences; +select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1; +select consecutivesequences(s1,s2) from root.test.d1; -# ConsecutiveWindows -select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 +# ConsecutiveWindows; +select consecutivewindows(s1,s2,'length'='10m') from root.test.d1; ``` ### 10.8 Machine Learning @@ -1249,14 +1253,14 @@ select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 For details and examples, see the document [Machine-Learning](../SQL-Manual/UDF-Libraries.md#machine-learning). ```sql -# AR -select ar(s0,"p"="2") from root.test.d0 +# AR; +select ar(s0,"p"="2") from root.test.d0; -# Representation -select representation(s0,"tb"="3","vb"="2") from root.test.d0 +# Representation; +select representation(s0,"tb"="3","vb"="2") from root.test.d0; -# RM -select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 +# RM; +select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0; ``` @@ -1270,24 +1274,24 @@ when 1000=1050 then "bad temperature" when P<=1000000 or P>=1100000 then "bad pressure" end as `result` -from root.test1 +from root.test1; select str, case when str like "%cc%" then "has cc" when str like "%dd%" then "has dd" else "no cc and dd" end as `result` -from root.test2 +from root.test2; select count(case when x<=1 then 1 end) as `(-∞,1]`, count(case when 1 ### 14.3 UDF Queries ```sql -SELECT example(*) from root.sg.d1 -SELECT example(s1, *) from root.sg.d1 -SELECT example(*, *) from root.sg.d1 +SELECT example(*) from root.sg.d1; +SELECT example(s1, *) from root.sg.d1; +SELECT example(*, *) from root.sg.d1; SELECT example(s1, 'key1'='value1', 'key2'='value2'), example(*, 'key3'='value3') FROM root.sg.d1; SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; @@ -1495,7 +1499,7 @@ SELECT s1, s2, s1 + example(s1, s2), s1 - example(s1 + example(s1, s2) / s2) FRO ### 14.4 Show All Registered UDFs ```sql -SHOW FUNCTIONS +SHOW FUNCTIONS; ``` ## 15. ADMINISTRATION MANAGEMENT @@ -1507,69 +1511,69 @@ For more details, see document [Authority Management](../User-Manual/Authority-M - Create user (Requires MANAGE_USER permission) ```SQL -CREATE USER -eg: CREATE USER user1 'passwd' +CREATE USER ; +eg: CREATE USER user1 'passwd'; ``` - Delete user (Requires MANAGE_USER permission) ```sql -DROP USER -eg: DROP USER user1 +DROP USER ; +eg: DROP USER user1; ``` - Create role (Requires MANAGE_ROLE permission) ```sql -CREATE ROLE -eg: CREATE ROLE role1 +CREATE ROLE ; +eg: CREATE ROLE role1; ``` - Delete role (Requires MANAGE_ROLE permission) ```sql -DROP ROLE -eg: DROP ROLE role1 +DROP ROLE ; +eg: DROP ROLE role1; ``` - Grant role to user (Requires MANAGE_ROLE permission) ```sql -GRANT ROLE TO -eg: GRANT ROLE admin TO user1 +GRANT ROLE TO ; +eg: GRANT ROLE admin TO user1; ``` - Revoke role from user(Requires MANAGE_ROLE permission) ```sql -REVOKE ROLE FROM -eg: REVOKE ROLE admin FROM user1 +REVOKE ROLE FROM ; +eg: REVOKE ROLE admin FROM user1; ``` - List all user (Requires MANAGE_USER permission) ```sql -LIST USER +LIST USER; ``` - List all role (Requires MANAGE_ROLE permission) ```sql -LIST ROLE +LIST ROLE; ``` - List all users granted specific role.(Requires MANAGE_USER permission) ```sql -LIST USER OF ROLE -eg: LIST USER OF ROLE roleuser +LIST USER OF ROLE ; +eg: LIST USER OF ROLE roleuser; ``` - List all role granted to specific user. ```sql -LIST ROLE OF USER -eg: LIST ROLE OF USER tempuser +LIST ROLE OF USER ; +eg: LIST ROLE OF USER tempuser; ``` - List all privileges of user @@ -1618,13 +1622,13 @@ eg: REVOKE ALL ON root.** FROM USER user1; #### Delete Time Partition (experimental) ```sql -Eg: IoTDB > DELETE PARTITION root.ln 0,1,2 +Eg: DELETE PARTITION root.ln 0,1,2; ``` #### Continuous Query,CQ ```sql -Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END +Eg: CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END; ``` #### Maintenance Command @@ -1632,42 +1636,42 @@ Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO - FLUSH ```sql -Eg: IoTDB > flush +Eg: flush; ``` - MERGE ```sql -Eg: IoTDB > MERGE -Eg: IoTDB > FULL MERGE +Eg: MERGE; +Eg: FULL MERGE; ``` - CLEAR CACHE ```sql -Eg: IoTDB > CLEAR CACHE +Eg: CLEAR CACHE; ``` - START REPAIR DATA ```sql -Eg: IoTDB > START REPAIR DATA +Eg: START REPAIR DATA; ``` - STOP REPAIR DATA ```sql -Eg: IoTDB > STOP REPAIR DATA +Eg: STOP REPAIR DATA; ``` - SET SYSTEM TO READONLY / WRITABLE ```sql -Eg: IoTDB > SET SYSTEM TO READONLY / WRITABLE +Eg: SET SYSTEM TO READONLY / WRITABLE; ``` - Query abort ```sql -Eg: IoTDB > KILL QUERY 1 +Eg: KILL QUERY 1; ``` \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_apache.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_apache.md index cff2f64f3..e95978a87 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_apache.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_apache.md @@ -76,7 +76,7 @@ comment - 注意:SQL中特殊字符或中文表名需加双引号。原生API中无需额外添加,否则表名会包含引号字符。 - 当为表命名时,最外层的双引号(`""`)不会在实际创建的表名中出现。 - - ```SQL + - ```shell -- SQL 中 "a""b" --> a"b """""" --> "" @@ -133,15 +133,20 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **示例:** -```SQL -IoTDB> show tables from database1 +```sql +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -168,7 +173,9 @@ IoTDB> show tables details from database1 **示例:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -183,8 +190,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -220,7 +230,9 @@ SHOW CREATE TABLE **示例:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -237,12 +249,16 @@ Total line number = 1 **语法:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; + +COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **说明:** @@ -254,11 +270,11 @@ ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? col **示例:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 1.6 删除表 @@ -268,12 +284,12 @@ COMMENT ON COLUMN table1.a IS null **语法:** ```SQL -DROP TABLE (IF EXISTS)? +DROP TABLE (IF EXISTS)? ; ``` **示例:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md index 32294fb21..201124a9f 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md @@ -76,7 +76,7 @@ comment - 注意:SQL中特殊字符或中文表名需加双引号。原生API中无需额外添加,否则表名会包含引号字符。 - 当为表命名时,最外层的双引号(`""`)不会在实际创建的表名中出现。 - - ```SQL + - ```shell -- SQL 中 "a""b" --> a"b """""" --> "" @@ -134,14 +134,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **示例:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -168,7 +173,9 @@ IoTDB> show tables details from database1 **示例:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -183,8 +190,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -221,7 +231,9 @@ SHOW CREATE TABLE **示例:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -238,12 +250,15 @@ Total line number = 1 **语法:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **说明:** @@ -255,11 +270,11 @@ ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? col **示例:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 1.6 删除表 @@ -275,6 +290,6 @@ DROP TABLE (IF EXISTS)? **示例:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_apache.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_apache.md index 46e8a631f..ce5d55fdc 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_apache.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_apache.md @@ -110,7 +110,9 @@ try (ITableSession session = 在代码执行完成后,可以通过下述语句确认表已成功创建,其中包含了时间列、标签列、属性列以及测点列等各类信息。 ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +-----------+---------+-----------+ | ColumnName| DataType| Category| +-----------+---------+-----------+ @@ -131,9 +133,9 @@ IoTDB> desc table1 **示例:** ```SQL -INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('北京', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1) +INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('北京', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北京', '1001', '100', '2025-11-26 13:38:00', 91.0) +INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北京', '1001', '100', '2025-11-26 13:38:00', 91.0); ``` ### 1.4 空值写入 @@ -143,10 +145,10 @@ INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北 **示例(与上述示例等价):** ```SQL -# 上述部分列写入等价于如下的带空值写入 -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1) +# 上述部分列写入等价于如下的带空值写入; +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null) +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null); ``` 当向不包含任何标签列的表中写入数据时,系统将默认创建一个所有标签列值均为 null 的device。 @@ -163,13 +165,13 @@ INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, tem INSERT INTO table1 VALUES ('2025-11-26 13:37:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('2025-11-26 13:38:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25') +('2025-11-26 13:38:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25'); INSERT INTO table1 (region, plant_id, device_id, model_id, maintenance, time, temperature, humidity, status, arrival_time) VALUES ('北京', '1001', '100', 'A', '180', '2025-11-26 13:37:00', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('北京', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25') +('北京', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25'); ``` #### 注意事项 @@ -194,7 +196,7 @@ INSERT INTO table_name [ ( column [, ... ] ) ] query 以[示例数据](../Reference/Sample-Data.md)为源数据,先创建目标表 ```SQL -IoTDB:database1> CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); +CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); Msg: The statement is executed successfully. ``` @@ -205,9 +207,13 @@ Msg: The statement is executed successfully. 例如:使用标准查询语句,将 table1 中北京地区的 time, region, device\_id, temperature 数据查询写回到 target\_table 中 ```SQL -IoTDB:database1> insert into target_table select time,region,device_id,temperature from table1 where region = '北京' +insert into target_table select time,region,device_id,temperature from table1 where region = '北京'; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region='北京' +``` +```sql +select * from target_table where region='北京'; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -232,9 +238,13 @@ It costs 0.029s 例如:使用表引用查询,将 table3 中的数据查询写回到 target\_table 中 ```SQL -IoTDB:database1> insert into target_table(time,device_id,temperature) table table3 +insert into target_table(time,device_id,temperature) table table3; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region is null +``` +```sql +select * from target_table where region is null; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -257,9 +267,13 @@ It costs 0.015s 例如:使用子查询,将 table1 中时间与 table2 上海地区记录匹配的数据的 time, region, device\_id, temperature 查询写回到 target\_table ```SQL -IoTDB:database1> insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = '上海')) +insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = '上海')); Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region = '上海' +``` +```sql +select * from target_table where region = '上海'; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -313,7 +327,7 @@ insert into tableName(time, columnName) values(timeValue, to_object(isEOF, offse 向表 table1 中增加 object 类型字段 s1 ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS s1 OBJECT FIELD COMMENT 'object类型' +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS s1 OBJECT FIELD COMMENT 'object类型'; ``` 1. 不分段写入 @@ -325,12 +339,12 @@ insert into table1(time, device_id, s1) values(now(), 'tag1', to_object(true, 0, 2. 分段写入 ```SQL ---分段写入 object 数据 ---第一次写入:to_object(false, 0, X'696F') +--分段写入 object 数据; +--第一次写入:to_object(false, 0, X'696F'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(false, 0, X'696F')); ---第二次写入:to_object(false, 2, X'7464') +--第二次写入:to_object(false, 2, X'7464'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(false, 2, X'7464')); ---第三次写入:to_object(true, 4, X'62') +--第三次写入:to_object(true, 4, X'62'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(true, 4, X'62')); ``` @@ -364,5 +378,5 @@ updateAssignment **示例:** ```SQL -update table1 set b = a where substring(a, 1, 1) like '%' +update table1 set b = a where substring(a, 1, 1) like '%'; ``` \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md index 64ede20a8..666e07dfc 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md @@ -110,7 +110,9 @@ try (ITableSession session = 在代码执行完成后,可以通过下述语句确认表已成功创建,其中包含了时间列、标签列、属性列以及测点列等各类信息。 ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +-----------+---------+-----------+ | ColumnName| DataType| Category| +-----------+---------+-----------+ @@ -131,9 +133,9 @@ IoTDB> desc table1 **示例:** ```SQL -INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('北京', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1) +INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('北京', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北京', '1001', '100', '2025-11-26 13:38:00', 91.0) +INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北京', '1001', '100', '2025-11-26 13:38:00', 91.0); ``` ### 1.4 空值写入 @@ -143,10 +145,10 @@ INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北 **示例(与上述示例等价):** ```SQL -# 上述部分列写入等价于如下的带空值写入 -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1) +# 上述部分列写入等价于如下的带空值写入; +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null) +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null); ``` 当向不包含任何标签列的表中写入数据时,系统将默认创建一个所有标签列值均为 null 的device。 @@ -163,13 +165,13 @@ INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, tem INSERT INTO table1 VALUES ('2025-11-26 13:37:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('2025-11-26 13:38:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25') +('2025-11-26 13:38:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25'); INSERT INTO table1 (region, plant_id, device_id, model_id, maintenance, time, temperature, humidity, status, arrival_time) VALUES ('北京', '1001', '100', 'A', '180', '2025-11-26 13:37:00', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('北京', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25') +('北京', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25'); ``` #### 注意事项 @@ -205,9 +207,13 @@ Msg: The statement is executed successfully. 例如:使用标准查询语句,将 table1 中北京地区的 time, region, device\_id, temperature 数据查询写回到 target\_table 中 ```SQL -IoTDB:database1> insert into target_table select time,region,device_id,temperature from table1 where region = '北京' +insert into target_table select time,region,device_id,temperature from table1 where region = '北京'; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region='北京' +``` +```sql +select * from target_table where region='北京'; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -232,9 +238,13 @@ It costs 0.029s 例如:使用表引用查询,将 table3 中的数据查询写回到 target\_table 中 ```SQL -IoTDB:database1> insert into target_table(time,device_id,temperature) table table3 +insert into target_table(time,device_id,temperature) table table3; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region is null +``` +```sql +select * from target_table where region is null; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -257,9 +267,13 @@ It costs 0.015s 例如:使用子查询,将 table1 中时间与 table2 上海地区记录匹配的数据的 time, region, device\_id, temperature 查询写回到 target\_table ```SQL -IoTDB:database1> insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = '上海')) +insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = '上海')); Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region = '上海' +``` +```sql +select * from target_table where region = '上海'; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -314,7 +328,7 @@ insert into tableName(time, columnName) values(timeValue, to_object(isEOF, offse 向表 table1 中增加 object 类型字段 s1 ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS s1 OBJECT FIELD COMMENT 'object类型' +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS s1 OBJECT FIELD COMMENT 'object类型'; ``` 1. 不分段写入 @@ -326,12 +340,12 @@ insert into table1(time, device_id, s1) values(now(), 'tag1', to_object(true, 0, 2. 分段写入 ```SQL ---分段写入 object 数据 ---第一次写入:to_object(false, 0, X'696F') +--分段写入 object 数据; +--第一次写入:to_object(false, 0, X'696F'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(false, 0, X'696F')); ---第二次写入:to_object(false, 2, X'7464') +--第二次写入:to_object(false, 2, X'7464'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(false, 2, X'7464')); ---第三次写入:to_object(true, 4, X'62') +--第三次写入:to_object(true, 4, X'62'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(true, 4, X'62')); ``` @@ -365,5 +379,5 @@ updateAssignment **示例:** ```SQL -update table1 set b = a where substring(a, 1, 1) like '%' +update table1 set b = a where substring(a, 1, 1) like '%'; ``` diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_apache.md b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_apache.md index 406f3af92..7fd5deafe 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_apache.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_apache.md @@ -39,7 +39,7 @@ CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? CREATE DATABASE database1; CREATE DATABASE IF NOT EXISTS database1; --- 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年。 +-- 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年; CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); ``` @@ -54,7 +54,7 @@ USE **示例:** ```SQL -USE database1 +USE database1; ``` ### 1.3 查看当前数据库 @@ -62,22 +62,26 @@ USE database1 **语法:** ```SQL -SHOW CURRENT_DATABASE +SHOW CURRENT_DATABASE; ``` **示例:** ```SQL -IoTDB> SHOW CURRENT_DATABASE; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ | null| +---------------+ - -IoTDB> USE database1; - -IoTDB> SHOW CURRENT_DATABASE; +``` +```sql +USE database1; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ @@ -98,15 +102,20 @@ SHOW DATABASES (DETAILS)? **示例:** ```SQL -IoTDB> show databases +show databases; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| +------------------+-------+-----------------------+---------------------+---------------------+ | database1| INF| 1| 1| 604800000| |information_schema| INF| null| null| null| +------------------+-------+-----------------------+---------------------+---------------------+ - -IoTDB> show databases details +``` +```sql +show databases details; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|DataRegionGroupNum| +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ @@ -120,7 +129,7 @@ IoTDB> show databases details **语法:** ```SQL -ALTER DATABASE (IF EXISTS)? database=identifier SET PROPERTIES propertyAssignments +ALTER DATABASE (IF EXISTS)? database=identifier SET PROPERTIES propertyAssignments; ``` **示例:** @@ -134,13 +143,13 @@ ALTER DATABASE database1 SET PROPERTIES TTL=31536000000; **语法:** ```SQL -DROP DATABASE (IF EXISTS)? +DROP DATABASE (IF EXISTS)? ; ``` **示例:** ```SQL -DROP DATABASE IF EXISTS database1 +DROP DATABASE IF EXISTS database1; ``` ## 2. 表管理 @@ -216,14 +225,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **示例:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -242,7 +256,9 @@ IoTDB> show tables details from database1 **示例:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -257,8 +273,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -286,7 +305,9 @@ SHOW CREATE TABLE **示例:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -301,22 +322,25 @@ Total line number = 1 **语法:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **示例:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 2.6 删除表 @@ -324,14 +348,14 @@ COMMENT ON COLUMN table1.a IS null **语法:** ```SQL -DROP TABLE (IF EXISTS)? +DROP TABLE (IF EXISTS)? ; ``` **示例:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md index 27e7c1b7a..9efc3a547 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md @@ -39,7 +39,7 @@ CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? CREATE DATABASE database1; CREATE DATABASE IF NOT EXISTS database1; --- 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年。 +-- 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年; CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); ``` @@ -54,7 +54,7 @@ USE **示例:** ```SQL -USE database1 +USE database1; ``` ### 1.3 查看当前数据库 @@ -62,22 +62,26 @@ USE database1 **语法:** ```SQL -SHOW CURRENT_DATABASE +SHOW CURRENT_DATABASE; ``` **示例:** ```SQL -IoTDB> SHOW CURRENT_DATABASE; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ | null| +---------------+ - -IoTDB> USE database1; - -IoTDB> SHOW CURRENT_DATABASE; +``` +```sql +USE database1; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ @@ -98,15 +102,20 @@ SHOW DATABASES (DETAILS)? **示例:** ```SQL -IoTDB> show databases +show databases; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| +------------------+-------+-----------------------+---------------------+---------------------+ | database1| INF| 1| 1| 604800000| |information_schema| INF| null| null| null| +------------------+-------+-----------------------+---------------------+---------------------+ - -IoTDB> show databases details +``` +```sql +show databases details; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|DataRegionGroupNum| +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ @@ -120,7 +129,7 @@ IoTDB> show databases details **语法:** ```SQL -ALTER DATABASE (IF EXISTS)? database=identifier SET PROPERTIES propertyAssignments +ALTER DATABASE (IF EXISTS)? database=identifier SET PROPERTIES propertyAssignments; ``` **示例:** @@ -134,13 +143,13 @@ ALTER DATABASE database1 SET PROPERTIES TTL=31536000000; **语法:** ```SQL -DROP DATABASE (IF EXISTS)? +DROP DATABASE (IF EXISTS)? ; ``` **示例:** ```SQL -DROP DATABASE IF EXISTS database1 +DROP DATABASE IF EXISTS database1; ``` ## 2. 表管理 @@ -216,14 +225,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **示例:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -242,7 +256,9 @@ IoTDB> show tables details from database1 **示例:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -257,8 +273,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -286,7 +305,9 @@ SHOW CREATE TABLE **示例:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -301,22 +322,25 @@ Total line number = 1 **语法:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **示例:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 2.6 删除表 @@ -330,8 +354,8 @@ DROP TABLE (IF EXISTS)? **示例:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` diff --git a/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_apache.md b/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_apache.md index d57a6d9bf..720669e64 100644 --- a/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_apache.md +++ b/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_apache.md @@ -29,16 +29,16 @@ 我们可以根据存储模型建立相应的数据库。如下所示: -``` -IoTDB > CREATE DATABASE root.ln +```sql +CREATE DATABASE root.ln; ``` 需要注意的是,推荐创建一个 database. Database 的父子节点都不能再设置 database。例如在已经有`root.ln`和`root.sgcc`这两个 database 的情况下,创建`root.ln.wf01` database 是不可行的。系统将给出相应的错误提示,如下所示: -``` -IoTDB> CREATE DATABASE root.ln.wf01 +```sql +CREATE DATABASE root.ln.wf01; Msg: 300: root.ln has already been created as database. ``` Database 节点名命名规则: @@ -47,7 +47,8 @@ Database 节点名命名规则: - 纯数字(如 12345) - 含有特殊字符(如 . 或 \_)并可能引发歧义的名称(如 db.01、\_temp) 3. 反引号的特殊处理: - 若节点名本身需要包含反引号(\`),则需用**两个反引号(\`\`)** 表示一个反引号。例如:命名为\`db123\`\`(本身包含一个反引号),需写为 \`db123\`\`\`。 +若节点名本身需要包含反引号(\`),则需用**两个反引号(\`\`)** 表示一个反引号。例如:命名为\`db123\`\`(本身包含一个反引号),需写为 \`db123\`\`\`。 + 还需注意,如果在 Windows 系统上部署,database 名是大小写不敏感的。例如同时创建`root.ln` 和 `root.LN` 是不被允许的。 @@ -55,15 +56,15 @@ Database 节点名命名规则: 在 database 创建后,我们可以使用 [SHOW DATABASES](../SQL-Manual/SQL-Manual.md#查看数据库) 语句和 [SHOW DATABASES \](../SQL-Manual/SQL-Manual.md#查看数据库) 来查看 database,SQL 语句如下所示: -``` -IoTDB> show databases -IoTDB> show databases root.* -IoTDB> show databases root.** +```sql +show databases; +show databases root.*; +show databases root.**; ``` 执行结果为: -``` +```shell +-------------+----+-------------------------+-----------------------+-----------------------+ | database| ttl|schema_replication_factor|data_replication_factor|time_partition_interval| +-------------+----+-------------------------+-----------------------+-----------------------+ @@ -78,11 +79,11 @@ It costs 0.060s 用户可以使用`DELETE DATABASE `语句删除该路径模式匹配的所有的数据库。在删除的过程中,需要注意的是数据库的数据也会被删除。 -``` -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// 删除所有数据,时间序列以及数据库 -IoTDB > DELETE DATABASE root.** +```sql +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// 删除所有数据,时间序列以及数据库; +DELETE DATABASE root.**; ``` ### 1.4 统计数据库数量 @@ -91,17 +92,17 @@ IoTDB > DELETE DATABASE root.** SQL 语句如下所示: -``` -IoTDB> show databases -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +```sql +show databases; +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` 执行结果为: -``` +```shell +-------------+ | database| +-------------+ @@ -159,7 +160,7 @@ TTL的默认单位为毫秒,如果配置文件中的时间精度修改为其 #### TTL Path 规则 设置的路径 path 只支持前缀路径(即路径中间不能带 \* , 且必须以 \*\* 结尾),该路径会匹配到设备,也允许用户指定不带星的 path 为具体的 database 或 device,当 path 不带 \* 时,会检查是否匹配到 database,若匹配到 database,则会同时设置 path 和 path.\*\*。 注意:设备 TTL 设置不会对元数据的存在性进行校验,即允许对一条不存在的设备设置 TTL。 -``` +```shell 合格的 path: root.** root.db.** @@ -177,7 +178,7 @@ root.db.* #### 设置 TTL set ttl 操作可以理解为设置一条 TTL规则,比如 set ttl to root.sg.group1.\*\* 就相当于对所有可以匹配到该路径模式的设备挂载 ttl。 unset ttl 操作表示对相应路径模式卸载 TTL,若不存在对应 TTL,则不做任何事。若想把 TTL 调成无限大,则可以使用 INF 关键字 设置 TTL 的 SQL 语句如下所示: -``` +```sql set ttl to pathPattern 360000; ``` pathPattern 是前缀路径,即路径中间不能带 \* 且必须以 \*\* 结尾。 @@ -189,30 +190,30 @@ pathPattern 匹配对应的设备。为了兼容老版本 SQL 语法,允许用 取消 TTL 的 SQL 语句如下所示: -``` -IoTDB> unset ttl from root.ln +```sql +unset ttl from root.ln; ``` 取消设置 TTL 后, `root.ln` 路径下所有的数据都会被保存。 -``` -IoTDB> unset ttl from root.sgcc.** +```sql +unset ttl from root.sgcc.**; ``` 取消设置`root.sgcc`路径下的所有的 TTL 。 -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.**; ``` 取消设置所有的 TTL 。 新语法 -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.**; ``` 旧语法 -``` -IoTDB> unset ttl to root.** +```sql +unset ttl to root.**; ``` 新旧语法在功能上没有区别并且同时兼容,仅是新语法在用词上更符合常规。 #### 显示 TTL @@ -220,8 +221,10 @@ IoTDB> unset ttl to root.** 显示 TTL 的 SQL 语句如下所示: show all ttl +```sql +SHOW ALL TTL; ``` -IoTDB> SHOW ALL TTL +```shell +--------------+--------+ | path| TTL| | root.**|55555555| @@ -229,9 +232,12 @@ IoTDB> SHOW ALL TTL +--------------+--------+ ``` -show ttl on pathPattern +show ttl on pathPattern; + +```sql +SHOW TTL ON root.db.**; ``` -IoTDB> SHOW TTL ON root.db.**; +```shell +--------------+--------+ | path| TTL| | root.db.**|55555555| @@ -242,8 +248,10 @@ SHOW ALL TTL 这个例子会给出所有的 TTL。 SHOW TTL ON pathPattern 这个例子会显示指定路径的 TTL。 显示设备的 TTL。 +```sql +show devices; ``` -IoTDB> show devices +```shell +---------------+---------+---------+ | Device|IsAligned| TTL| +---------------+---------+---------+ @@ -282,12 +290,12 @@ IoTDB> show devices 用户可以在创建 Database 时设置上述任意异构参数,SQL 语句如下所示: -``` +```sql CREATE DATABASE prefixPath (WITH databaseAttributeClause (COMMA? databaseAttributeClause)*)? ``` 例如: -``` +```sql CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTOR=3, SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -295,12 +303,12 @@ CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTO 用户可以在 IoTDB 运行时调整部分异构参数,SQL 语句如下所示: -``` +```sql ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)* ``` 例如: -``` +```sql ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -312,14 +320,16 @@ ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; 用户可以查询每个 Database 的具体异构配置,SQL 语句如下所示: -``` +```sql SHOW DATABASES DETAILS prefixPath? ``` 例如: +```sql +SHOW DATABASES DETAILS; ``` -IoTDB> SHOW DATABASES DETAILS +```shell +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ |Database| TTL|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|MinSchemaRegionGroupNum|MaxSchemaRegionGroupNum|DataRegionGroupNum|MinDataRegionGroupNum|MaxDataRegionGroupNum| +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ @@ -364,21 +374,21 @@ CREATE DEVICE TEMPLATE ALIGNED? '(' create device template t1 (temperature FLOAT, status BOOLEAN) +```sql +create device template t1 (temperature FLOAT, status BOOLEAN); ``` **示例2:** 创建包含一组对齐序列的元数据模板 -```shell -IoTDB> create device template t2 aligned (lat FLOAT, lon FLOAT) +```sql +create device template t2 aligned (lat FLOAT, lon FLOAT); ``` 其中,物理量 `lat` 和 `lon` 是对齐的。 创建模板时,系统会默认指定编码压缩方式,无需手动指定,若业务场景需要手动调整,可参考如下示例: -```shell -IoTDB> create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) +```sql +create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY); ``` 更多详细的数据类型与编码方式的对应列表请参见 [压缩&编码](../Technical-Insider/Encoding-and-Compression.md)。 @@ -394,8 +404,8 @@ IoTDB> create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN 挂载元数据模板的 SQL 语句如下所示: -```shell -IoTDB> set device template t1 to root.sg1.d1 +```sql +set device template t1 to root.sg1.d1; ``` ### 2.3 激活设备模板 @@ -404,21 +414,21 @@ IoTDB> set device template t1 to root.sg1.d1 **注意**:在插入数据之前或系统未开启自动注册序列功能,模板定义的时间序列不会被创建。可以使用如下SQL语句在插入数据前创建时间序列即激活模板: -```shell -IoTDB> create timeseries using device template on root.sg1.d1 +```sql +create timeseries using device template on root.sg1.d1; ``` **示例:** 执行以下语句 -```shell -IoTDB> set device template t1 to root.sg1.d1 -IoTDB> set device template t2 to root.sg1.d2 -IoTDB> create timeseries using device template on root.sg1.d1 -IoTDB> create timeseries using device template on root.sg1.d2 +```sql +set device template t1 to root.sg1.d1; +set device template t2 to root.sg1.d2; +create timeseries using device template on root.sg1.d1; +create timeseries using device template on root.sg1.d2; ``` 查看此时的时间序列: ```sql -show timeseries root.sg1.** +show timeseries root.sg1.**; ``` ```shell @@ -434,7 +444,7 @@ show timeseries root.sg1.** 查看此时的设备: ```sql -show devices root.sg1.** +show devices root.sg1.**; ``` ```shell @@ -452,8 +462,8 @@ show devices root.sg1.** SQL 语句如下所示: -```shell -IoTDB> show device templates +```sql +show device templates; ``` 执行结果如下: @@ -470,8 +480,8 @@ IoTDB> show device templates SQL 语句如下所示: -```shell -IoTDB> show nodes in device template t1 +```sql +show nodes in device template t1; ``` 执行结果如下: @@ -486,8 +496,8 @@ IoTDB> show nodes in device template t1 - 查看挂载了某个设备模板的路径 -```shell -IoTDB> show paths set device template t1 +```sql +show paths set device template t1; ``` 执行结果如下: @@ -501,8 +511,8 @@ IoTDB> show paths set device template t1 - 查看使用了某个设备模板的路径(即模板在该路径上已激活,序列已创建) -```shell -IoTDB> show paths using device template t1 +```sql +show paths using device template t1; ``` 执行结果如下: @@ -518,26 +528,26 @@ IoTDB> show paths using device template t1 若需删除模板表示的某一组时间序列,可采用解除模板操作,SQL语句如下所示: -```shell -IoTDB> delete timeseries of device template t1 from root.sg1.d1 +```sql +delete timeseries of device template t1 from root.sg1.d1; ``` 或 -```shell -IoTDB> deactivate device template t1 from root.sg1.d1 +```sql +deactivate device template t1 from root.sg1.d1; ``` 解除操作支持批量处理,SQL语句如下所示: -```shell -IoTDB> delete timeseries of device template t1 from root.sg1.*, root.sg2.* +```sql +delete timeseries of device template t1 from root.sg1.*, root.sg2.*; ``` 或 -```shell -IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* +```sql +deactivate device template t1 from root.sg1.*, root.sg2.*; ``` 若解除命令不指定模板名称,则会将给定路径涉及的所有模板使用情况均解除。 @@ -546,8 +556,8 @@ IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* 卸载设备模板的 SQL 语句如下所示: -```shell -IoTDB> unset device template t1 from root.sg1.d1 +```sql +unset device template t1 from root.sg1.d1; ``` **注意**:不支持卸载仍处于激活状态的模板,需保证执行卸载操作前解除对该模板的所有使用,即删除所有该模板表示的序列。 @@ -556,8 +566,8 @@ IoTDB> unset device template t1 from root.sg1.d1 删除设备模板的 SQL 语句如下所示: -```shell -IoTDB> drop device template t1 +```sql +drop device template t1; ``` **注意**:不支持删除已经挂载的模板,需在删除操作前保证该模板卸载成功。 @@ -568,8 +578,8 @@ IoTDB> drop device template t1 修改设备模板的 SQL 语句如下所示: -```shell -IoTDB> alter device template t1 add (speed FLOAT) +```sql +alter device template t1 add (speed FLOAT); ``` **向已挂载模板的路径下的设备中写入数据,若写入请求中的物理量不在模板中,将自动扩展模板。** @@ -581,34 +591,34 @@ IoTDB> alter device template t1 add (speed FLOAT) 根据建立的数据模型,我们可以分别在两个数据库中创建相应的时间序列。创建时间序列的 SQL 语句如下所示: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +```sql +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` 从 v0.13 起,可以使用简化版的 SQL 语句创建时间序列: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT +```sql +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` 创建时间序列时,系统会默认指定编码压缩方式,无需手动指定,若业务场景需要手动调整,可参考如下示例: -``` -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY +```sql +create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY; ``` 需要注意的是,如果手动指定了编码方式,但与数据类型不对应时,系统会给出相应的错误提示,如下所示: -``` -IoTDB> create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF +```sql +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF; error: encoding TS_2DIFF does not support BOOLEAN ``` @@ -618,8 +628,8 @@ error: encoding TS_2DIFF does not support BOOLEAN 创建一组对齐时间序列的SQL语句如下所示: -``` -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +```sql +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` 一组对齐序列中的序列可以有不同的数据类型、编码方式以及压缩方式。 @@ -630,11 +640,11 @@ IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOA 我们可以使用`(DELETE | DROP) TimeSeries `语句来删除我们之前创建的时间序列。SQL 语句如下所示: -``` -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +```sql +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 3.4 查看时间序列 @@ -655,14 +665,14 @@ IoTDB> drop timeseries root.ln.wf02.* 返回给定路径的下的所有时间序列信息。其中 `Path` 需要为一个时间序列路径或路径模式。例如,分别查看`root`路径和`root.ln`路径下的时间序列,SQL 语句如下所示: -``` -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** +```sql +show timeseries root.**; +show timeseries root.ln.**; ``` 执行结果分别为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -693,21 +703,21 @@ It costs 0.004s 只返回从指定下标开始的结果,最大返回条数被 LIMIT 限制,用于分页查询。例如: -``` -show timeseries root.ln.** limit 10 offset 10 +```sql +show timeseries root.ln.** limit 10 offset 10; ``` * SHOW TIMESERIES WHERE TIMESERIES contains 'containStr' 对查询结果集根据 timeseries 名称进行字符串模糊匹配过滤。例如: -``` -show timeseries root.ln.** where timeseries contains 'wf01.wt' +```sql +show timeseries root.ln.** where timeseries contains 'wf01.wt'; ``` 执行结果为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -722,13 +732,13 @@ It costs 0.016s 对查询结果集根据时间序列数据类型进行过滤。例如: -``` -show timeseries root.ln.** where dataType=FLOAT +```sql +show timeseries root.ln.** where dataType=FLOAT; ``` 执行结果为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -747,14 +757,14 @@ It costs 0.016s 对查询结果集根据标签进行过滤。例如: -``` -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` 执行结果分别为: -``` +```shell +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags|attributes|deadband|deadband parameters| +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ @@ -770,7 +780,6 @@ It costs 0.005s +------------------------+-----+-------------+--------+--------+-----------+-----------------------+----------+--------+-------------------+ Total line number = 1 It costs 0.004s - ``` * SHOW LATEST TIMESERIES @@ -789,21 +798,21 @@ IoTDB 支持使用`COUNT TIMESERIES`来统计一条路径中的时间序 * 可以通过 `WHERE` 条件对标签点进行过滤,语法为: `COUNT TIMESERIES WHERE TAGS(key)='value'` 或 `COUNT TIMESERIES WHERE TAGS(key) contains 'value'`。 * 可以通过定义`LEVEL`来统计指定层级下的时间序列个数。这条语句可以用来统计每一个设备下的传感器数量,语法为:`COUNT TIMESERIES GROUP BY LEVEL=`。 -``` -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +```sql +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc'; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; ``` 例如有如下时间序列(可以使用`show timeseries`展示所有时间序列): -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -825,16 +834,20 @@ It costs 0.004s 可以看到,`root`被定义为`LEVEL=0`。那么当你输入如下语句时: -``` -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` 你将得到以下结果: +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` -IoTDB> COUNT TIMESERIES root.** GROUP BY LEVEL=1 +```shell +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -845,7 +858,6 @@ IoTDB> COUNT TIMESERIES root.** GROUP BY LEVEL=1 Total line number = 3 It costs 0.002s -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -855,7 +867,6 @@ IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 Total line number = 2 It costs 0.002s -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -877,8 +888,8 @@ It costs 0.002s * 属性只能用时间序列路径来查询:时间序列路径 -> 属性 所用到的扩展的创建时间序列的 SQL 语句如下所示: -``` -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +```sql +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` 括号里的`temprature`是`s1`这个传感器的别名。 @@ -891,48 +902,48 @@ create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v * 标签点属性更新 创建时间序列后,我们也可以对其原有的标签点属性进行更新,主要有以下六种更新方式: * 重命名标签或属性 -``` -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +```sql +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` * 重新设置标签或属性的值 -``` -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +```sql +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` * 删除已经存在的标签或属性 -``` -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +```sql +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` * 添加新的标签 -``` -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +```sql +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` * 添加新的属性 -``` -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +```sql +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` * 更新插入别名,标签和属性 > 如果该别名,标签或属性原来不存在,则插入,否则,用新值更新原来的旧值 -``` -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +```sql +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4); ``` * 使用标签作为过滤条件查询时间序列,使用 TAGS(tagKey) 来标识作为过滤条件的标签 -``` +```sql SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause ``` 返回给定路径的下的所有满足条件的时间序列信息,SQL 语句如下所示: -``` -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` 执行结果分别为: -``` +```shell +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags|attributes|deadband|deadband parameters| +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ @@ -952,23 +963,22 @@ It costs 0.004s - 使用标签作为过滤条件统计时间序列数量 -``` -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +```sql +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` 返回给定路径的下的所有满足条件的时间序列的数量,SQL 语句如下所示: -``` -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +```sql +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` 执行结果分别为: -``` -IoTDB> count timeseries +```shell +-----------------+ |count(timeseries)| +-----------------+ @@ -976,7 +986,7 @@ IoTDB> count timeseries +-----------------+ Total line number = 1 It costs 0.019s -IoTDB> count timeseries root.** where TAGS(unit)='c' + +-----------------+ |count(timeseries)| +-----------------+ @@ -984,7 +994,7 @@ IoTDB> count timeseries root.** where TAGS(unit)='c' +-----------------+ Total line number = 1 It costs 0.020s -IoTDB> count timeseries root.** where TAGS(unit)='c' group by level = 2 + +--------------+-----------------+ | column|count(timeseries)| +--------------+-----------------+ @@ -1000,14 +1010,16 @@ It costs 0.011s 创建对齐时间序列 -``` -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +```sql +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` 执行结果如下: +```sql +show timeseries; ``` -IoTDB> show timeseries +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -1018,8 +1030,10 @@ IoTDB> show timeseries 支持查询: +```sql +show timeseries where TAGS(tag1)='v1' ``` -IoTDB> show timeseries where TAGS(tag1)='v1' +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -1084,7 +1098,7 @@ IoTDB> show timeseries where TAGS(tag1)='v1' ### 4.5 查看路径的所有子路径 -``` +```sql SHOW CHILD PATHS pathPattern ``` @@ -1096,7 +1110,7 @@ SHOW CHILD PATHS pathPattern * 查询 root.ln 的下一层:show child paths root.ln -``` +```shell +------------+----------+ | child paths|node types| +------------+----------+ @@ -1109,7 +1123,7 @@ It costs 0.002s * 查询形如 root.xx.xx.xx 的路径:show child paths root.\*.\* -``` +```shell +---------------+ | child paths| +---------------+ @@ -1120,8 +1134,8 @@ It costs 0.002s ### 4.6 查看路径的下一级节点 -``` -SHOW CHILD NODES pathPattern +```sql +SHOW CHILD NODES pathPattern; ``` 可以查看此路径模式所匹配的节点的下一层的所有节点。 @@ -1130,7 +1144,7 @@ SHOW CHILD NODES pathPattern * 查询 root 的下一层:show child nodes root -``` +```shell +------------+ | child nodes| +------------+ @@ -1140,7 +1154,7 @@ SHOW CHILD NODES pathPattern * 查询 root.ln 的下一层 :show child nodes root.ln -``` +```shell +------------+ | child nodes| +------------+ @@ -1154,16 +1168,16 @@ SHOW CHILD NODES pathPattern IoTDB 支持使用`COUNT NODES LEVEL=`来统计当前 Metadata 树下满足某路径模式的路径中指定层级的节点个数。这条语句可以用来统计带有特定采样点的设备数。例如: -``` -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.* LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +```sql +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.* LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` 对于上面提到的例子和 Metadata Tree,你可以获得如下结果: -``` +```shell +------------+ |count(nodes)| +------------+ @@ -1213,19 +1227,19 @@ It costs 0.002s SQL 语句如下所示: -``` -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' -IoTDB> show devices root.ln.** where template = 't1' -IoTDB> show devices root.ln.** where template is null -IoTDB> show devices root.ln.** where template != 't1' -IoTDB> show devices root.ln.** where template is not null +```sql +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; +show devices root.ln.** where template = 't1'; +show devices root.ln.** where template is null; +show devices root.ln.** where template != 't1'; +show devices root.ln.** where template is not null; ``` 你可以获得如下数据: -``` +```shell +-------------------+---------+---------+ | devices|isAligned| Template| +-------------------+---------+---------+ @@ -1282,14 +1296,14 @@ It costs 0.001s SQL 语句如下所示: -``` -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +```sql +show devices with database; +show devices root.ln.** with database; ``` 你可以获得如下数据: -``` +```shell +-------------------+-------------+---------+---------+ | devices| database|isAligned| Template| +-------------------+-------------+---------+---------+ @@ -1319,15 +1333,15 @@ It costs 0.001s SQL 语句如下所示: -``` -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +```sql +show devices; +count devices; +count devices root.ln.**; ``` 你可以获得如下数据: -``` +```shell +-------------------+---------+---------+ | devices|isAligned| Template| +-------------------+---------+---------+ diff --git a/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md b/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md index 529aab548..75e6abc26 100644 --- a/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md @@ -29,16 +29,16 @@ 我们可以根据存储模型建立相应的数据库。如下所示: -``` -IoTDB > CREATE DATABASE root.ln +```sql +CREATE DATABASE root.ln; ``` 需要注意的是,推荐创建一个 database. Database 的父子节点都不能再设置 database。例如在已经有`root.ln`和`root.sgcc`这两个 database 的情况下,创建`root.ln.wf01` database 是不可行的。系统将给出相应的错误提示,如下所示: -``` -IoTDB> CREATE DATABASE root.ln.wf01 +```sql +CREATE DATABASE root.ln.wf01; Msg: 300: root.ln has already been created as database. ``` Database 节点名命名规则: @@ -55,15 +55,15 @@ Database 节点名命名规则: 在 database 创建后,我们可以使用 [SHOW DATABASES](../SQL-Manual/SQL-Manual.md#查看数据库) 语句和 [SHOW DATABASES \](../SQL-Manual/SQL-Manual.md#查看数据库) 来查看 database,SQL 语句如下所示: -``` -IoTDB> show databases -IoTDB> show databases root.* -IoTDB> show databases root.** +```sql +show databases; +show databases root.*; +show databases root.**; ``` 执行结果为: -``` +```shell +-------------+----+-------------------------+-----------------------+-----------------------+ | database| ttl|schema_replication_factor|data_replication_factor|time_partition_interval| +-------------+----+-------------------------+-----------------------+-----------------------+ @@ -78,11 +78,11 @@ It costs 0.060s 用户可以使用`DELETE DATABASE `语句删除该路径模式匹配的所有的数据库。在删除的过程中,需要注意的是数据库的数据也会被删除。 -``` -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// 删除所有数据,时间序列以及数据库 -IoTDB > DELETE DATABASE root.** +```sql +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// 删除所有数据,时间序列以及数据库; +DELETE DATABASE root.**; ``` ### 1.4 统计数据库数量 @@ -91,17 +91,17 @@ IoTDB > DELETE DATABASE root.** SQL 语句如下所示: -``` -IoTDB> show databases -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +```sql +show databases; +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` 执行结果为: -``` +```shell +-------------+ | database| +-------------+ @@ -159,7 +159,7 @@ TTL的默认单位为毫秒,如果配置文件中的时间精度修改为其 #### TTL Path 规则 设置的路径 path 只支持前缀路径(即路径中间不能带 \* , 且必须以 \*\* 结尾),该路径会匹配到设备,也允许用户指定不带星的 path 为具体的 database 或 device,当 path 不带 \* 时,会检查是否匹配到 database,若匹配到 database,则会同时设置 path 和 path.\*\*。 注意:设备 TTL 设置不会对元数据的存在性进行校验,即允许对一条不存在的设备设置 TTL。 -``` +```shell 合格的 path: root.** root.db.** @@ -177,7 +177,7 @@ root.db.* #### 设置 TTL set ttl 操作可以理解为设置一条 TTL规则,比如 set ttl to root.sg.group1.\*\* 就相当于对所有可以匹配到该路径模式的设备挂载 ttl。 unset ttl 操作表示对相应路径模式卸载 TTL,若不存在对应 TTL,则不做任何事。若想把 TTL 调成无限大,则可以使用 INF 关键字 设置 TTL 的 SQL 语句如下所示: -``` +```sql set ttl to pathPattern 360000; ``` pathPattern 是前缀路径,即路径中间不能带 \* 且必须以 \*\* 结尾。 @@ -189,30 +189,30 @@ pathPattern 匹配对应的设备。为了兼容老版本 SQL 语法,允许用 取消 TTL 的 SQL 语句如下所示: -``` -IoTDB> unset ttl from root.ln +```sql +unset ttl from root.ln; ``` 取消设置 TTL 后, `root.ln` 路径下所有的数据都会被保存。 -``` -IoTDB> unset ttl from root.sgcc.** +```sql +unset ttl from root.sgcc.**; ``` 取消设置`root.sgcc`路径下的所有的 TTL 。 -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.**; ``` 取消设置所有的 TTL 。 新语法 -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.**; ``` 旧语法 -``` -IoTDB> unset ttl to root.** +```sql +unset ttl to root.**; ``` 新旧语法在功能上没有区别并且同时兼容,仅是新语法在用词上更符合常规。 #### 显示 TTL @@ -220,8 +220,10 @@ IoTDB> unset ttl to root.** 显示 TTL 的 SQL 语句如下所示: show all ttl +```sql +SHOW ALL TTL; ``` -IoTDB> SHOW ALL TTL +```shell +--------------+--------+ | path| TTL| | root.**|55555555| @@ -230,8 +232,10 @@ IoTDB> SHOW ALL TTL ``` show ttl on pathPattern +```sql +SHOW TTL ON root.db.**; ``` -IoTDB> SHOW TTL ON root.db.**; +```shell +--------------+--------+ | path| TTL| | root.db.**|55555555| @@ -242,8 +246,10 @@ SHOW ALL TTL 这个例子会给出所有的 TTL。 SHOW TTL ON pathPattern 这个例子会显示指定路径的 TTL。 显示设备的 TTL。 +```sql +show devices; ``` -IoTDB> show devices +```shell +---------------+---------+---------+ | Device|IsAligned| TTL| +---------------+---------+---------+ @@ -282,12 +288,12 @@ IoTDB> show devices 用户可以在创建 Database 时设置上述任意异构参数,SQL 语句如下所示: -``` +```sql CREATE DATABASE prefixPath (WITH databaseAttributeClause (COMMA? databaseAttributeClause)*)? ``` 例如: -``` +```sql CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTOR=3, SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -295,12 +301,12 @@ CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTO 用户可以在 IoTDB 运行时调整部分异构参数,SQL 语句如下所示: -``` +```sql ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)* ``` 例如: -``` +```sql ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -312,14 +318,16 @@ ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; 用户可以查询每个 Database 的具体异构配置,SQL 语句如下所示: -``` +```sql SHOW DATABASES DETAILS prefixPath? ``` 例如: +```sql +SHOW DATABASES DETAILS; ``` -IoTDB> SHOW DATABASES DETAILS +```shell +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ |Database| TTL|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|MinSchemaRegionGroupNum|MaxSchemaRegionGroupNum|DataRegionGroupNum|MinDataRegionGroupNum|MaxDataRegionGroupNum| +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ @@ -351,34 +359,34 @@ It costs 0.058s 根据建立的数据模型,我们可以分别在两个数据库中创建相应的时间序列。创建时间序列的 SQL 语句如下所示: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +```sql +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` 从 v0.13 起,可以使用简化版的 SQL 语句创建时间序列: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT +```sql +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` 创建时间序列时,系统会默认指定编码压缩方式,无需手动指定,若业务场景需要手动调整,可参考如下示例: -``` -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY +```sql +create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY; ``` 需要注意的是,如果手动指定了编码方式,但与数据类型不对应时,系统会给出相应的错误提示,如下所示: -``` -IoTDB> create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF +```sql +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF; error: encoding TS_2DIFF does not support BOOLEAN ``` @@ -389,8 +397,8 @@ error: encoding TS_2DIFF does not support BOOLEAN 创建一组对齐时间序列的SQL语句如下所示: -``` -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +```sql +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` 一组对齐序列中的序列可以有不同的数据类型、编码方式以及压缩方式。 @@ -401,11 +409,11 @@ IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOA 我们可以使用`(DELETE | DROP) TimeSeries `语句来删除我们之前创建的时间序列。SQL 语句如下所示: -``` -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +```sql +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 2.4 查看时间序列 @@ -426,14 +434,14 @@ IoTDB> drop timeseries root.ln.wf02.* 返回给定路径的下的所有时间序列信息。其中 `Path` 需要为一个时间序列路径或路径模式。例如,分别查看`root`路径和`root.ln`路径下的时间序列,SQL 语句如下所示: -``` -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** +```sql +show timeseries root.**; +show timeseries root.ln.**; ``` 执行结果分别为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -464,21 +472,21 @@ It costs 0.004s 只返回从指定下标开始的结果,最大返回条数被 LIMIT 限制,用于分页查询。例如: -``` -show timeseries root.ln.** limit 10 offset 10 +```sql +show timeseries root.ln.** limit 10 offset 10; ``` * SHOW TIMESERIES WHERE TIMESERIES contains 'containStr' 对查询结果集根据 timeseries 名称进行字符串模糊匹配过滤。例如: -``` -show timeseries root.ln.** where timeseries contains 'wf01.wt' +```sql +show timeseries root.ln.** where timeseries contains 'wf01.wt'; ``` 执行结果为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -493,13 +501,13 @@ It costs 0.016s 对查询结果集根据时间序列数据类型进行过滤。例如: -``` -show timeseries root.ln.** where dataType=FLOAT +```sql +show timeseries root.ln.** where dataType=FLOAT; ``` 执行结果为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -517,14 +525,14 @@ It costs 0.016s 对查询结果集根据标签进行过滤。例如: -``` -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` 执行结果分别为: -``` +```shell +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags|attributes|deadband|deadband parameters| +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ @@ -561,21 +569,21 @@ IoTDB 支持使用`COUNT TIMESERIES`来统计一条路径中的时间序 * 可以通过 `WHERE` 条件对标签点进行过滤,语法为: `COUNT TIMESERIES WHERE TAGS(key)='value'` 或 `COUNT TIMESERIES WHERE TAGS(key) contains 'value'`。 * 可以通过定义`LEVEL`来统计指定层级下的时间序列个数。这条语句可以用来统计每一个设备下的传感器数量,语法为:`COUNT TIMESERIES GROUP BY LEVEL=`。 -``` -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +```sql +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' ; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; ``` 例如有如下时间序列(可以使用`show timeseries`展示所有时间序列): -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -597,16 +605,20 @@ It costs 0.004s 可以看到,`root`被定义为`LEVEL=0`。那么当你输入如下语句时: -``` -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` 你将得到以下结果: +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` -IoTDB> COUNT TIMESERIES root.** GROUP BY LEVEL=1 +```shell +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -617,7 +629,6 @@ IoTDB> COUNT TIMESERIES root.** GROUP BY LEVEL=1 Total line number = 3 It costs 0.002s -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -627,7 +638,6 @@ IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 Total line number = 2 It costs 0.002s -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -645,11 +655,15 @@ It costs 0.002s 需要注意的是, 在带有时间过滤的元数据查询中并不考虑视图的存在,只考虑TsFile中实际存储的时间序列。 一个使用样例如下: -``` -IoTDB> insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); -IoTDB> insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); -IoTDB> insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); -IoTDB> show timeseries; +```sql +insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); +insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); +insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); +show timeseries; +show timeseries where time >= 15000 and time < 16000; +count timeseries where time >= 15000 and time < 16000; +``` +```shell +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ | Timeseries|Alias|Database|DataType|Encoding|Compression|Tags|Attributes|Deadband|DeadbandParameters|ViewType| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ @@ -661,7 +675,6 @@ IoTDB> show timeseries; |root.sg.data2.s2| null| root.sg| FLOAT| GORILLA| LZ4|null| null| null| null| BASE| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ -IoTDB> show timeseries where time >= 15000 and time < 16000; +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ | Timeseries|Alias|Database|DataType|Encoding|Compression|Tags|Attributes|Deadband|DeadbandParameters|ViewType| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ @@ -671,7 +684,6 @@ IoTDB> show timeseries where time >= 15000 and time < 16000; |root.sg.data2.s2| null| root.sg| FLOAT| GORILLA| LZ4|null| null| null| null| BASE| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ -IoTDB> count timeseries where time >= 15000 and time < 16000; +-----------------+ |count(timeseries)| +-----------------+ @@ -690,8 +702,8 @@ IoTDB> count timeseries where time >= 15000 and time < 16000; * 属性只能用时间序列路径来查询:时间序列路径 -> 属性 所用到的扩展的创建时间序列的 SQL 语句如下所示: -``` -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT, encoding=RLE, compression=SNAPPY tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +```sql +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT, encoding=RLE, compression=SNAPPY tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` 括号里的`temprature`是`s1`这个传感器的别名。 @@ -704,48 +716,48 @@ create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT, encoding=R * 标签点属性更新 创建时间序列后,我们也可以对其原有的标签点属性进行更新,主要有以下六种更新方式: * 重命名标签或属性 -``` -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +```sql +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` * 重新设置标签或属性的值 -``` -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +```sql +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` * 删除已经存在的标签或属性 -``` -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +```sql +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` * 添加新的标签 -``` -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +```sql +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` * 添加新的属性 -``` -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +```sql +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` * 更新插入别名,标签和属性 > 如果该别名,标签或属性原来不存在,则插入,否则,用新值更新原来的旧值 -``` -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +```sql +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4); ``` * 使用标签作为过滤条件查询时间序列,使用 TAGS(tagKey) 来标识作为过滤条件的标签 -``` +```sql SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause ``` 返回给定路径的下的所有满足条件的时间序列信息,SQL 语句如下所示: -``` -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` 执行结果分别为: -``` +```shell +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags|attributes|deadband|deadband parameters| +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ @@ -765,23 +777,22 @@ It costs 0.004s - 使用标签作为过滤条件统计时间序列数量 -``` -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +```sql +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` 返回给定路径的下的所有满足条件的时间序列的数量,SQL 语句如下所示: -``` -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +```sql +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` 执行结果分别为: -``` -IoTDB> count timeseries +```shell +-----------------+ |count(timeseries)| +-----------------+ @@ -789,7 +800,7 @@ IoTDB> count timeseries +-----------------+ Total line number = 1 It costs 0.019s -IoTDB> count timeseries root.** where TAGS(unit)='c' + +-----------------+ |count(timeseries)| +-----------------+ @@ -797,7 +808,7 @@ IoTDB> count timeseries root.** where TAGS(unit)='c' +-----------------+ Total line number = 1 It costs 0.020s -IoTDB> count timeseries root.** where TAGS(unit)='c' group by level = 2 + +--------------+-----------------+ | column|count(timeseries)| +--------------+-----------------+ @@ -813,14 +824,16 @@ It costs 0.011s 创建对齐时间序列 -``` -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +```sql +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` 执行结果如下: +```sql +show timeseries; ``` -IoTDB> show timeseries +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -831,8 +844,10 @@ IoTDB> show timeseries 支持查询: +```sql +show timeseries where TAGS(tag1)='v1'; ``` -IoTDB> show timeseries where TAGS(tag1)='v1' +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -897,8 +912,8 @@ IoTDB> show timeseries where TAGS(tag1)='v1' ### 3.5 查看路径的所有子路径 -``` -SHOW CHILD PATHS pathPattern +```sql +SHOW CHILD PATHS pathPattern; ``` 可以查看此路径模式所匹配的所有路径的下一层的所有路径和它对应的节点类型,即pathPattern.*所匹配的路径及其节点类型。 @@ -909,7 +924,7 @@ SHOW CHILD PATHS pathPattern * 查询 root.ln 的下一层:show child paths root.ln -``` +```shell +------------+----------+ | child paths|node types| +------------+----------+ @@ -922,7 +937,7 @@ It costs 0.002s * 查询形如 root.xx.xx.xx 的路径:show child paths root.\*.\* -``` +```shell +---------------+ | child paths| +---------------+ @@ -933,8 +948,8 @@ It costs 0.002s ### 3.6 查看路径的下一级节点 -``` -SHOW CHILD NODES pathPattern +```sql +SHOW CHILD NODES pathPattern; ``` 可以查看此路径模式所匹配的节点的下一层的所有节点。 @@ -943,7 +958,7 @@ SHOW CHILD NODES pathPattern * 查询 root 的下一层:show child nodes root -``` +```shell +------------+ | child nodes| +------------+ @@ -953,7 +968,7 @@ SHOW CHILD NODES pathPattern * 查询 root.ln 的下一层 :show child nodes root.ln -``` +```shell +------------+ | child nodes| +------------+ @@ -967,16 +982,16 @@ SHOW CHILD NODES pathPattern IoTDB 支持使用`COUNT NODES LEVEL=`来统计当前 Metadata 树下满足某路径模式的路径中指定层级的节点个数。这条语句可以用来统计带有特定采样点的设备数。例如: -``` -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.* LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +```sql +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.* LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` 对于上面提到的例子和 Metadata Tree,你可以获得如下结果: -``` +```shell +------------+ |count(nodes)| +------------+ @@ -1024,15 +1039,15 @@ It costs 0.002s SQL 语句如下所示: -``` -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' +```sql +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; ``` 你可以获得如下数据: -``` +```shell +-------------------+---------+---------+ | devices|isAligned| Template| +-------------------+---------+---------+ @@ -1074,14 +1089,14 @@ It costs 0.001s SQL 语句如下所示: -``` -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +```sql +show devices with database; +show devices root.ln.** with database; ``` 你可以获得如下数据: -``` +```shell +-------------------+-------------+---------+---------+ | devices| database|isAligned| Template| +-------------------+-------------+---------+---------+ @@ -1111,15 +1126,15 @@ It costs 0.001s SQL 语句如下所示: -``` -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +```sql +show devices; +count devices; +count devices root.ln.**; ``` 你可以获得如下数据: -``` +```shell +-------------------+---------+---------+ | devices|isAligned| Template| +-------------------+---------+---------+ @@ -1150,11 +1165,15 @@ It costs 0.004s ### 3.10 活跃设备查询 和活跃时间序列一样,我们可以在查看和统计设备的基础上添加时间过滤条件来查询在某段时间内存在数据的活跃设备。这里活跃的定义与活跃时间序列相同,使用样例如下: -``` -IoTDB> insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); -IoTDB> insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); -IoTDB> insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); -IoTDB> show devices; +```sql +insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); +insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); +insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); +show devices; +show devices where time >= 15000 and time < 16000; +count devices where time >= 15000 and time < 16000; +``` +```shell +-------------------+---------+ | devices|isAligned| +-------------------+---------+ @@ -1163,7 +1182,6 @@ IoTDB> show devices; | root.sg.data3| false| +-------------------+---------+ -IoTDB> show devices where time >= 15000 and time < 16000; +-------------------+---------+ | devices|isAligned| +-------------------+---------+ @@ -1171,7 +1189,6 @@ IoTDB> show devices where time >= 15000 and time < 16000; | root.sg.data2| false| +-------------------+---------+ -IoTDB> count devices where time >= 15000 and time < 16000; +--------------+ |count(devices)| +--------------+ diff --git a/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md b/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md index 213d04f8e..aa5134863 100644 --- a/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md +++ b/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md @@ -117,7 +117,7 @@ IoTDB 支持即席(Ad_hoc)查询,即支持用户在使用系统时,自定义 SQL 语句为: ```sql -select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` 其含义为: @@ -126,7 +126,7 @@ select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.temperature| +-----------------------------+-----------------------------+ @@ -157,7 +157,7 @@ select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05: 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -188,7 +188,7 @@ select status, temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -220,7 +220,7 @@ select wf01.wt01.status, wf02.wt02.hardware from root.ln where (time > 2017-11-0 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+--------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf02.wt02.hardware| +-----------------------------+------------------------+--------------------------+ @@ -248,7 +248,7 @@ select * from root.ln.** where time > 1 order by time desc limit 10; 语句执行的结果为: -``` +```shell +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ | Time|root.ln.wf02.wt02.hardware|root.ln.wf02.wt02.status|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ @@ -428,7 +428,7 @@ from root.sg1; 运行结果: -``` +```shell +-----------------------------+----------+----------+----------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Time|root.sg1.a|root.sg1.b|((((root.sg1.a + 1) * 2) - 1) % 2) + 1.5|sin(root.sg1.a + sin(root.sg1.a + sin(root.sg1.b)))|(-root.sg1.a + root.sg1.b * ((sin(root.sg1.a + root.sg1.b) * sin(root.sg1.a + root.sg1.b)) + (cos(root.sg1.a + root.sg1.b) * cos(root.sg1.a + root.sg1.b)))) + 1| +-----------------------------+----------+----------+----------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -446,12 +446,12 @@ It costs 0.048s **示例 2:** ```sql -select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; ``` 运行结果: -``` +```shell +-----------------------------+----------------------------------------------+ | Time|((root.sg.a + root.sg.b) * 2) + sin(root.sg.a)| +-----------------------------+----------------------------------------------+ @@ -472,12 +472,12 @@ It costs 0.011s **示例 3:** ```sql -select (a + *) / 2 from root.sg1 +select (a + *) / 2 from root.sg1; ``` 运行结果: -``` +```shell +-----------------------------+-----------------------------+-----------------------------+ | Time|(root.sg1.a + root.sg1.a) / 2|(root.sg1.a + root.sg1.b) / 2| +-----------------------------+-----------------------------+-----------------------------+ @@ -494,12 +494,12 @@ It costs 0.011s **示例 4:** ```sql -select (a + b) * 3 from root.sg, root.ln +select (a + b) * 3 from root.sg, root.ln; ``` 运行结果: -``` +```shell +-----------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ | Time|(root.sg.a + root.sg.b) * 3|(root.sg.a + root.ln.b) * 3|(root.ln.a + root.sg.b) * 3|(root.ln.a + root.ln.b) * 3| +-----------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ @@ -538,7 +538,7 @@ from root.ln.wf01.wt01; 运行结果: -``` +```shell +----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+--------------------------------------------------------------------+ |avg(root.ln.wf01.wt01.temperature)|sin(avg(root.ln.wf01.wt01.temperature))|avg(root.ln.wf01.wt01.temperature) + 1|-sum(root.ln.wf01.wt01.hardware)|avg(root.ln.wf01.wt01.temperature) + sum(root.ln.wf01.wt01.hardware)| +----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+--------------------------------------------------------------------+ @@ -553,12 +553,12 @@ It costs 0.009s ```sql select avg(*), (avg(*) + 1) * 3 / 2 -1 -from root.sg1 +from root.sg1; ``` 运行结果: -``` +```shell +---------------+---------------+-------------------------------------+-------------------------------------+ |avg(root.sg1.a)|avg(root.sg1.b)|(avg(root.sg1.a) + 1) * 3 / 2 - 1 |(avg(root.sg1.b) + 1) * 3 / 2 - 1 | +---------------+---------------+-------------------------------------+-------------------------------------+ @@ -582,7 +582,7 @@ GROUP BY([10, 90), 10ms); 运行结果: -``` +```shell +-----------------------------+----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+----------+ | Time|avg(root.ln.wf01.wt01.temperature)|sin(avg(root.ln.wf01.wt01.temperature))|avg(root.ln.wf01.wt01.temperature) + 1|-sum(root.ln.wf01.wt01.hardware)|custom_sum| +-----------------------------+----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+----------+ @@ -617,7 +617,7 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < - 结果集为四列的结构: - ``` + ```shell +----+----------+-----+--------+ |Time|timeseries|value|dataType| +----+----------+-----+--------+ @@ -627,8 +627,10 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < **示例 1:** 查询 root.ln.wf01.wt01.status 的最新数据点 +```sql +select last status from root.ln.wf01.wt01; ``` -IoTDB> select last status from root.ln.wf01.wt01 +```shell +-----------------------------+------------------------+-----+--------+ | Time| timeseries|value|dataType| +-----------------------------+------------------------+-----+--------+ @@ -640,8 +642,10 @@ It costs 0.000s **示例 2:** 查询 root.ln.wf01.wt01 下 status,temperature 时间戳大于等于 2017-11-07T23:50:00 的最新数据点。 +```sql +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; ``` -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -654,8 +658,10 @@ It costs 0.002s **示例 3:** 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照序列名降序排列。 +```sql +select last * from root.ln.wf01.wt01 order by timeseries desc; ``` -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -668,8 +674,10 @@ It costs 0.002s **示例 4:** 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照dataType降序排列。 +```sql +select last * from root.ln.wf01.wt01 order by dataType desc; ``` -IoTDB> select last * from root.ln.wf01.wt01 order by dataType desc; +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -682,8 +690,10 @@ It costs 0.002s **注意:** 可以通过函数组合方式实现其他过滤条件查询最新点的需求,例如 +```sql +select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device; ``` -IoTDB> select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +```shell +-----------------+---------------------+----------------+-----------------------+------------------+ | Device|max_time(temperature)|max_time(status)|last_value(temperature)|last_value(status)| +-----------------+---------------------+----------------+-----------------------+------------------+ @@ -705,7 +715,7 @@ It costs 0.021s ### 3.1 时间过滤条件 -使用时间过滤条件可以筛选特定时间范围的数据。对于时间戳支持的格式,请参考 [时间戳类型](../Background-knowledge/Data-Type_apache.md) 。 +使用时间过滤条件可以筛选特定时间范围的数据。对于时间戳支持的格式,请参考 [时间戳类型](../Background-knowledge/Data-Type.md) 。 示例如下: @@ -793,8 +803,10 @@ It costs 0.021s **示例 1:** 查询 `root.sg.d1` 下 `value` 含有`'cc'`的数据。 +```sql +select * from root.sg.d1 where value like '%cc%'; ``` -IoTDB> select * from root.sg.d1 where value like '%cc%' +```shell +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -807,8 +819,10 @@ It costs 0.002s **示例 2:** 查询 `root.sg.d1` 下 `value` 中间为 `'b'`、前后为任意单个字符的数据。 +```sql +select * from root.sg.device where value like '_b_'; ``` -IoTDB> select * from root.sg.device where value like '_b_' +```shell +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -824,7 +838,7 @@ It costs 0.002s **常见的正则匹配举例:** -``` +```shell 长度为3-20的所有字符:^.{3,20}$ 大写英文字符:^[A-Z]+$ 数字和英文字符:^[A-Za-z0-9]+$ @@ -833,8 +847,10 @@ It costs 0.002s **示例 1:** 查询 root.sg.d1 下 value 值为26个英文字符组成的字符串。 +```sql +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +``` ```shell -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -847,8 +863,10 @@ It costs 0.002s **示例 2:** 查询 root.sg.d1 下 value 值为26个小写英文字符组成的字符串且时间大于100的。 +```sql +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; +``` ```shell -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -903,7 +921,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -943,7 +961,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -968,7 +986,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -1008,7 +1026,7 @@ select count(status) from root.ln.wf01.wt01 where time > 2017-11-01T01:00:00 gro 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1052,7 +1070,7 @@ select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019- 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1084,7 +1102,7 @@ select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017 SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1139,7 +1157,7 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) 5. 当前暂不支持与`GROUP BY LEVEL`搭配使用。 使用如下的原始数据,接下来会给出几个事件分段查询的使用样例 -``` +```shell +-----------------------------+-------+-------+-------+--------+-------+-------+ | Time| s1| s2| s3| s4| s5| s6| +-----------------------------+-------+-------+-------+--------+-------+-------+ @@ -1159,10 +1177,10 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) ##### delta=0时的等值事件分段 使用如下sql语句 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); ``` 得到如下的查询结果,这里忽略了s6为null的行 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1174,10 +1192,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( ``` 当指定ignoreNull为false时,会将s6为null的数据也考虑进来 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); ``` 得到如下的结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1193,10 +1211,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( ##### delta!=0时的差值事件分段 使用如下sql语句 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); ``` 得到如下的查询结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1208,10 +1226,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( group by子句中的controlExpression同样支持列的表达式 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10); ``` 得到如下的查询结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1247,7 +1265,7 @@ keep表达式用来指定形成分组所需要连续满足`predict`条件的数 对于如下原始数据,下面会给出几个查询样例: -``` +```shell +-----------------------------+-------------------------+-------------------------------------+------------------------------------+ | Time|root.sg.beijing.car01.soc|root.sg.beijing.car01.charging_status|root.sg.beijing.car01.vehicle_status| +-----------------------------+-------------------------+-------------------------------------+------------------------------------+ @@ -1265,10 +1283,10 @@ keep表达式用来指定形成分组所需要连续满足`predict`条件的数 ``` 查询至少连续两行以上的charging_status=1的数据,sql语句如下: ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true); ``` 得到结果如下: -``` +```shell +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ | Time|max_time(root.sg.beijing.car01.charging_status)|count(root.sg.beijing.car01.vehicle_status)|last_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ @@ -1278,10 +1296,10 @@ select max_time(charging_status),count(vehicle_status),last_value(soc) from root ``` 当设置`ignoreNull`为false时,遇到null值为将其视为一个不满足条件的行,会结束正在计算的分组。 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false); ``` 得到如下结果,原先的分组被含null的行拆分: -``` +```shell +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ | Time|max_time(root.sg.beijing.car01.charging_status)|count(root.sg.beijing.car01.vehicle_status)|last_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ @@ -1310,7 +1328,7 @@ group by session(timeInterval) 3. 当前暂不支持与`GROUP BY LEVEL`搭配使用。 对于下面的原始数据,给出几个查询样例。 -``` +```shell +-----------------------------+-----------------+-----------+--------+------+ | Time| Device|temperature|hardware|status| +-----------------------------+-----------------+-----------+--------+------+ @@ -1341,10 +1359,10 @@ group by session(timeInterval) ``` 可以按照不同的时间单位设定时间间隔,sql语句如下: ```sql -select __endTime,count(*) from root.** group by session(1d) +select __endTime,count(*) from root.** group by session(1d); ``` 得到如下结果: -``` +```shell +-----------------------------+-----------------------------+------------------------------------+---------------------------------+-------------------------------+ | Time| __endTime|count(root.ln.wf02.wt01.temperature)|count(root.ln.wf02.wt01.hardware)|count(root.ln.wf02.wt01.status)| +-----------------------------+-----------------------------+------------------------------------+---------------------------------+-------------------------------+ @@ -1354,10 +1372,10 @@ select __endTime,count(*) from root.** group by session(1d) ``` 也可以和`HAVING`、`ALIGN BY DEVICE`共同使用 ```sql -select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` 得到如下结果,其中排除了`sum(hardware)`为0的部分 -``` +```shell +-----------------------------+-----------------+-----------------------------+-------------+ | Time| Device| __endTime|sum(hardware)| +-----------------------------+-----------------+-----------------------------+-------------+ @@ -1392,7 +1410,7 @@ group by count(controlExpression, size[,ignoreNull=true/false]) 4. 当一个分组内最终的点数不满足`size`的数量时,不会输出该分组的结果 对于下面的原始数据,给出几个查询样例。 -``` +```shell +-----------------------------+-----------+-----------------------+ | Time|root.sg.soc|root.sg.charging_status| +-----------------------------+-----------+-----------------------+ @@ -1410,10 +1428,10 @@ group by count(controlExpression, size[,ignoreNull=true/false]) ``` sql语句如下 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); ``` 得到如下结果,其中由于第二个1970-01-01T08:00:00.006+08:00到1970-01-01T08:00:00.010+08:00的窗口中包含四个点,不符合`size = 5`的条件,因此不被输出 -``` +```shell +-----------------------------+-----------------------------+--------------------------------------+ | Time| __endTime|first_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------+--------------------------------------+ @@ -1422,10 +1440,10 @@ select count(charging_stauts), first_value(soc) from root.sg group by count(char ``` 而当使用ignoreNull将null值也考虑进来时,可以得到两个点计数为5的窗口,sql如下 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` 得到如下结果 -``` +```shell +-----------------------------+-----------------------------+--------------------------------------+ | Time| __endTime|first_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------+--------------------------------------+ @@ -1445,12 +1463,12 @@ select count(charging_stauts), first_value(soc) from root.sg group by count(char **示例1:** 不同 database 下均存在名为 status 的序列, 如 "root.ln.wf01.wt01.status", "root.ln.wf02.wt02.status", 以及 "root.sgcc.wf03.wt01.status", 如果需要统计不同 database 下 status 序列的数据点个数,使用以下查询: ```sql -select count(status) from root.** group by level = 1 +select count(status) from root.** group by level = 1; ``` 运行结果为: -``` +```shell +-------------------------+---------------------------+ |count(root.ln.*.*.status)|count(root.sgcc.*.*.status)| +-------------------------+---------------------------+ @@ -1463,12 +1481,12 @@ It costs 0.003s **示例2:** 统计不同设备下 status 序列的数据点个数,可以规定 level = 3, ```sql -select count(status) from root.** group by level = 3 +select count(status) from root.** group by level = 3; ``` 运行结果为: -``` +```shell +---------------------------+---------------------------+ |count(root.*.*.wt01.status)|count(root.*.*.wt02.status)| +---------------------------+---------------------------+ @@ -1483,12 +1501,12 @@ It costs 0.003s **示例3:** 统计不同 database 下的不同设备中 status 序列的数据点个数,可以使用以下查询: ```sql -select count(status) from root.** group by level = 1, 3 +select count(status) from root.** group by level = 1, 3; ``` 运行结果为: -``` +```shell +----------------------------+----------------------------+------------------------------+ |count(root.ln.*.wt01.status)|count(root.ln.*.wt02.status)|count(root.sgcc.*.wt01.status)| +----------------------------+----------------------------+------------------------------+ @@ -1501,12 +1519,12 @@ It costs 0.003s **示例4:** 查询所有序列下温度传感器 temperature 的最大值,可以使用下列查询语句: ```sql -select max_value(temperature) from root.** group by level = 0 +select max_value(temperature) from root.** group by level = 0; ``` 运行结果: -``` +```shell +---------------------------------+ |max_value(root.*.*.*.temperature)| +---------------------------------+ @@ -1519,12 +1537,12 @@ It costs 0.013s **示例5:** 上面的查询都是针对某一个传感器,特别地,**如果想要查询某一层级下所有传感器拥有的总数据点数,则需要显式规定测点为 `*`** ```sql -select count(*) from root.ln.** group by level = 2 +select count(*) from root.ln.** group by level = 2; ``` 运行结果: -``` +```shell +----------------------+----------------------+ |count(root.*.wf01.*.*)|count(root.*.wf02.*.*)| +----------------------+----------------------+ @@ -1548,7 +1566,7 @@ select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017 结果: -``` +```shell +-----------------------------+-------------------------+ | Time|COUNT(root.ln.*.*.status)| +-----------------------------+-------------------------+ @@ -1570,7 +1588,7 @@ It costs 0.006s select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; ``` -``` +```shell +-----------------------------+-------------------------+ | Time|COUNT(root.ln.*.*.status)| +-----------------------------+-------------------------+ @@ -1661,7 +1679,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); 该查询会将具有同一个 `city` 标签值的时间序列的所有满足查询条件的点做平均值计算,计算结果如下 -``` +```shell +--------+------------------+ | city| avg(temperature)| +--------+------------------+ @@ -1692,7 +1710,7 @@ SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); 查询结果如下 -``` +```shell +--------+--------+------------------+ | city|workshop| avg(temperature)| +--------+--------+------------------+ @@ -1722,7 +1740,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 查询结果如下 -``` +```shell +-----------------------------+--------+--------+------------------+ | Time| city|workshop| avg(temperature)| +-----------------------------+--------+--------+------------------+ @@ -1762,16 +1780,16 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 下列使用方式是不正确的: ```sql - select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 - select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 + select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; + select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; ``` 2. 对`GROUP BY LEVEL`结果进行过滤时,`SELECT`和`HAVING`中出现的PATH只能有一级。 下列使用方式是不正确的: ```sql - select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 - select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 + select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; + select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` **SQL 示例:** @@ -1780,7 +1798,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 对于以下聚合结果进行过滤: - ``` + ```shell +-----------------------------+---------------------+---------------------+ | Time|count(root.test.*.s1)|count(root.test.*.s2)| +-----------------------------+---------------------+---------------------+ @@ -1798,7 +1816,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 执行结果如下: - ``` + ```shell +-----------------------------+---------------------+ | Time|count(root.test.*.s1)| +-----------------------------+---------------------+ @@ -1811,7 +1829,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS - **示例 2:** 对于以下聚合结果进行过滤: - ``` + ```shell +-----------------------------+-------------+---------+---------+ | Time| Device|count(s1)|count(s2)| +-----------------------------+-------------+---------+---------+ @@ -1834,7 +1852,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 执行结果如下: - ``` + ```shell +-----------------------------+-------------+---------+---------+ | Time| Device|count(s1)|count(s2)| +-----------------------------+-------------+---------+---------+ @@ -1899,7 +1917,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: 查询结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -1928,7 +1946,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `PREVIOUS` 填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -1950,9 +1968,9 @@ Total line number = 4 例如,原始数据如下所示: ```sql -select s1 from root.db.d1 -``` +select s1 from root.db.d1; ``` +```shell +-----------------------------+-------------+ | Time|root.db.d1.s1| +-----------------------------+-------------+ @@ -1969,9 +1987,9 @@ select s1 from root.db.d1 ```sql select avg(s1) from root.db.d1 - group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) -``` + group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m); ``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2005,7 +2023,7 @@ select avg(s1) group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) FILL(PREVIOUS); ``` -``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2039,7 +2057,7 @@ from root.db.d1 group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) FILL(PREVIOUS, 2m); ``` -``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2082,7 +2100,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `LINEAR` 填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2120,7 +2138,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `FLOAT` 类型的常量填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2143,7 +2161,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `BOOLEAN` 类型的常量填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2182,7 +2200,7 @@ Total line number = 4 SQL 语句: ```sql -select status, temperature from root.ln.wf01.wt01 limit 10 +select status, temperature from root.ln.wf01.wt01 limit 10; ``` 含义: @@ -2191,7 +2209,7 @@ select status, temperature from root.ln.wf01.wt01 limit 10 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2215,7 +2233,7 @@ It costs 0.000s SQL 语句: ```sql -select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; ``` 含义: @@ -2224,7 +2242,7 @@ select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2243,7 +2261,7 @@ It costs 0.342s SQL 语句: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:00.000 and time< 2024-07-12T00:12:00.000 limit 5 offset 3 +select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:00.000 and time< 2024-07-12T00:12:00.000 limit 5 offset 3; ``` 含义: @@ -2252,7 +2270,7 @@ select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:0 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2264,14 +2282,14 @@ select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:0 +-----------------------------+------------------------+-----------------------------+ Total line number = 5 It costs 0.070s -`` +``` - **示例 4:** `LIMIT` 子句与 `GROUP BY` 子句组合 SQL 语句: ```sql -select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3 +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3; ``` 含义: @@ -2280,7 +2298,7 @@ SQL 语句子句要求返回查询结果的第 3 至 6 行(第一行编号为 结果如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -2310,7 +2328,7 @@ It costs 0.016s SQL 语句: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; ``` 含义: @@ -2319,7 +2337,7 @@ select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 结果如下所示: -``` +```shell +-----------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.temperature| +-----------------------------+-----------------------------+ @@ -2339,7 +2357,7 @@ It costs 0.000s SQL 语句: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; ``` 含义: @@ -2348,7 +2366,7 @@ select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 结果如下所示: -``` +```shell +-----------------------------+------------------------+ | Time|root.ln.wf01.wt01.status| +-----------------------------+------------------------+ @@ -2368,12 +2386,12 @@ It costs 0.003s SQL 语句: ```sql -select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` 含义: -``` +```shell +-----------------------------+-----------------------------------+ | Time|max_value(root.ln.wf01.wt01.status)| +-----------------------------+-----------------------------------+ @@ -2394,7 +2412,7 @@ It costs 0.000s SQL 语句: ```sql -select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` 含义: @@ -2403,7 +2421,7 @@ select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 结果如下所示: -``` +```shell +-----------------------------+-----------------------------+------------------------+ | Time|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+-----------------------------+------------------------+ @@ -2431,7 +2449,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; ``` 执行结果: -``` +```shell +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ | Time|root.ln.wf02.wt02.hardware|root.ln.wf02.wt02.status|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ @@ -2463,7 +2481,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc, ``` 执行结果: -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2481,7 +2499,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc, select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; ``` 执行结果: -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2498,7 +2516,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,dev select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 结果如图所示,可以看出,`ORDER BY DEVICE ASC,TIME ASC`就是默认情况下的排序方式,由于`ASC`是默认排序顺序,此处可以省略。 -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2512,10 +2530,10 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 同样,可以在聚合查询中使用`ALIGN BY DEVICE`和`ORDER BY`子句,对聚合后的结果进行排序,示例代码如下所示: ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` 执行结果: -``` +```shell +-----------------------------+-----------------+---------------+-------------+------------------+ | Time| Device|count(hardware)|count(status)|count(temperature)| +-----------------------------+-----------------+---------------+-------------+------------------+ @@ -2534,7 +2552,7 @@ select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11 排序在通过`ASC`,`DESC`指定排序顺序的同时,可以通过`NULLS`语法来指定NULL值在排序中的优先级,`NULLS FIRST`默认NULL值在结果集的最上方,`NULLS LAST`则保证NULL值在结果集的最后。如果没有在子句中指定,则默认顺序为`ASC`,`NULLS LAST`。 对于如下的数据,将给出几个任意表达式的查询示例供参考: -``` +```shell +-----------------------------+-------------+-------+-------+--------+-------+ | Time| Device| base| score| bonus| total| +-----------------------------+-------------+-------+-------+--------+-------+ @@ -2555,11 +2573,11 @@ select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11 当需要根据基础分数score对结果进行排序时,可以直接使用 ```Sql -select score from root.** order by score desc align by device +select score from root.** order by score desc align by device; ``` 会得到如下结果 -``` +```shell +-----------------------------+---------+-----+ | Time| Device|score| +-----------------------------+---------+-----+ @@ -2578,15 +2596,15 @@ select score from root.** order by score desc align by device 当想要根据总分对结果进行排序,可以在order by子句中使用表达式进行计算 ```Sql -select score,total from root.one order by base+score+bonus desc +select score,total from root.one order by base+score+bonus desc; ``` 该sql等价于 ```Sql -select score,total from root.one order by total desc +select score,total from root.one order by total desc; ``` 得到如下结果 -``` +```shell +-----------------------------+--------------+--------------+ | Time|root.one.score|root.one.total| +-----------------------------+--------------+--------------+ @@ -2601,10 +2619,10 @@ select score,total from root.one order by total desc select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device + time desc align by device; ``` 得到如下结果 -``` +```shell +-----------------------------+----------+----+-----+-----+-----+ | Time| Device|base|score|bonus|total| +-----------------------------+----------+----+-----+-----+-----+ @@ -2625,10 +2643,10 @@ select base, score, bonus, total from root.** order by total desc NULLS Last, ``` 在order by中同样可以使用聚合查询表达式 ```Sql -select min_value(total) from root.** order by min_value(total) asc align by device +select min_value(total) from root.** order by min_value(total) asc align by device; ``` 得到如下结果 -``` +```shell +----------+----------------+ | Device|min_value(total)| +----------+----------------+ @@ -2641,11 +2659,11 @@ select min_value(total) from root.** order by min_value(total) asc align by devi ``` 当在查询中指定多列,未被排序的列会随着行和排序列一起改变顺序,当排序列相同时行的顺序和具体实现有关(没有固定顺序) ```Sql -select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; ``` 得到结果如下 -· -``` + +```shell +----------+----------------+---------------+ | Device|min_value(total)|max_value(base)| +----------+----------------+---------------+ @@ -2659,10 +2677,10 @@ select min_value(total),max_value(base) from root.** order by max_value(total) d Order by device, time可以和order by expression共同使用 ```Sql -select score from root.** order by device asc, score desc, time asc align by device +select score from root.** order by device asc, score desc, time asc align by device; ``` 会得到如下结果 -``` +```shell +-----------------------------+---------+-----+ | Time| Device|score| +-----------------------------+---------+-----+ @@ -2705,7 +2723,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; 执行如下: -``` +```shell +-----------------------------+-----------------+-----------+------+--------+ | Time| Device|temperature|status|hardware| +-----------------------------+-----------------+-----------+------+--------+ @@ -2772,8 +2790,10 @@ intoItem 下面通过示例进一步说明: - **示例 1**(按时间对齐) +```sql +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +``` ```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +--------------+-------------------+--------+ | source column| target timeseries| written| +--------------+-------------------+--------+ @@ -2799,8 +2819,10 @@ It costs 0.725s > - `written` 表示预期写入的数据量。 - **示例 2**(按时间对齐) +```sql +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +``` ```shell -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +--------------------------------------+-------------------------+--------+ | source column| target timeseries| written| +--------------------------------------+-------------------------+--------+ @@ -2815,8 +2837,10 @@ It costs 0.375s 该语句将聚合查询的结果存储到指定序列中。 - **示例 3**(按设备对齐) +```sql +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +``` ```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +--------------+--------------+-------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+-------------------+--------+ @@ -2837,8 +2861,10 @@ It costs 0.625s > 按设备对齐查询时,`CLI` 展示的结果集多出一列 `source device` 列表示查询的设备。 - **示例 4**(按设备对齐) +```sql +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +``` ```shell -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +--------------+--------------+------------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+------------------------+--------+ @@ -2983,8 +3009,10 @@ select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from #### 实现 IoTDB 内部 ETL 对原始数据进行 ETL 处理后写入新序列。 +```sql +SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +``` ```shell -IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +--------------+-------------------+---------------------------+--------+ | source device| source column| target timeseries| written| +--------------+-------------------+---------------------------+--------+ @@ -3001,8 +3029,10 @@ IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) #### 查询结果存储 将查询结果进行持久化存储,起到类似物化视图的作用。 +```sql +SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +``` ```shell -IOTDB > SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +--------------------------+-----------------------------+--------+ | source column| target timeseries| written| +--------------------------+-----------------------------+--------+ @@ -3020,8 +3050,10 @@ It costs 0.115s **注意:** 建议配合使用 `LIMIT & OFFSET` 子句或 `WHERE` 子句(时间过滤条件)对数据进行分批,防止单次操作的数据量过大。 +```sql +SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +``` ```shell -IOTDB > SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +--------------------------+----------------------+--------+ | source column| target timeseries| written| +--------------------------+----------------------+--------+ diff --git a/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md b/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md index 00becb9df..c90a119ec 100644 --- a/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md +++ b/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md @@ -126,7 +126,7 @@ select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.temperature| +-----------------------------+-----------------------------+ @@ -157,7 +157,7 @@ select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05: 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -188,7 +188,7 @@ select status, temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -220,7 +220,7 @@ select wf01.wt01.status, wf02.wt02.hardware from root.ln where (time > 2017-11-0 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+--------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf02.wt02.hardware| +-----------------------------+------------------------+--------------------------+ @@ -248,7 +248,7 @@ select * from root.ln.** where time > 1 order by time desc limit 10; 语句执行的结果为: -``` +```shell +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ | Time|root.ln.wf02.wt02.hardware|root.ln.wf02.wt02.status|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ @@ -428,7 +428,7 @@ from root.sg1; 运行结果: -``` +```shell +-----------------------------+----------+----------+----------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Time|root.sg1.a|root.sg1.b|((((root.sg1.a + 1) * 2) - 1) % 2) + 1.5|sin(root.sg1.a + sin(root.sg1.a + sin(root.sg1.b)))|(-root.sg1.a + root.sg1.b * ((sin(root.sg1.a + root.sg1.b) * sin(root.sg1.a + root.sg1.b)) + (cos(root.sg1.a + root.sg1.b) * cos(root.sg1.a + root.sg1.b)))) + 1| +-----------------------------+----------+----------+----------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -446,12 +446,12 @@ It costs 0.048s **示例 2:** ```sql -select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; ``` 运行结果: -``` +```shell +-----------------------------+----------------------------------------------+ | Time|((root.sg.a + root.sg.b) * 2) + sin(root.sg.a)| +-----------------------------+----------------------------------------------+ @@ -472,12 +472,12 @@ It costs 0.011s **示例 3:** ```sql -select (a + *) / 2 from root.sg1 +select (a + *) / 2 from root.sg1; ``` 运行结果: -``` +```shell +-----------------------------+-----------------------------+-----------------------------+ | Time|(root.sg1.a + root.sg1.a) / 2|(root.sg1.a + root.sg1.b) / 2| +-----------------------------+-----------------------------+-----------------------------+ @@ -494,12 +494,12 @@ It costs 0.011s **示例 4:** ```sql -select (a + b) * 3 from root.sg, root.ln +select (a + b) * 3 from root.sg, root.ln; ``` 运行结果: -``` +```shell +-----------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ | Time|(root.sg.a + root.sg.b) * 3|(root.sg.a + root.ln.b) * 3|(root.ln.a + root.sg.b) * 3|(root.ln.a + root.ln.b) * 3| +-----------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ @@ -538,7 +538,7 @@ from root.ln.wf01.wt01; 运行结果: -``` +```shell +----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+--------------------------------------------------------------------+ |avg(root.ln.wf01.wt01.temperature)|sin(avg(root.ln.wf01.wt01.temperature))|avg(root.ln.wf01.wt01.temperature) + 1|-sum(root.ln.wf01.wt01.hardware)|avg(root.ln.wf01.wt01.temperature) + sum(root.ln.wf01.wt01.hardware)| +----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+--------------------------------------------------------------------+ @@ -553,12 +553,12 @@ It costs 0.009s ```sql select avg(*), (avg(*) + 1) * 3 / 2 -1 -from root.sg1 +from root.sg1; ``` 运行结果: -``` +```shell +---------------+---------------+-------------------------------------+-------------------------------------+ |avg(root.sg1.a)|avg(root.sg1.b)|(avg(root.sg1.a) + 1) * 3 / 2 - 1 |(avg(root.sg1.b) + 1) * 3 / 2 - 1 | +---------------+---------------+-------------------------------------+-------------------------------------+ @@ -582,7 +582,7 @@ GROUP BY([10, 90), 10ms); 运行结果: -``` +```shell +-----------------------------+----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+----------+ | Time|avg(root.ln.wf01.wt01.temperature)|sin(avg(root.ln.wf01.wt01.temperature))|avg(root.ln.wf01.wt01.temperature) + 1|-sum(root.ln.wf01.wt01.hardware)|custom_sum| +-----------------------------+----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+----------+ @@ -617,7 +617,7 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < - 结果集为四列的结构: - ``` + ```shell +----+----------+-----+--------+ |Time|timeseries|value|dataType| +----+----------+-----+--------+ @@ -627,8 +627,10 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < **示例 1:** 查询 root.ln.wf01.wt01.status 的最新数据点 +```sql + select last status from root.ln.wf01.wt01; ``` -IoTDB> select last status from root.ln.wf01.wt01 +```shell +-----------------------------+------------------------+-----+--------+ | Time| timeseries|value|dataType| +-----------------------------+------------------------+-----+--------+ @@ -640,8 +642,10 @@ It costs 0.000s **示例 2:** 查询 root.ln.wf01.wt01 下 status,temperature 时间戳大于等于 2017-11-07T23:50:00 的最新数据点。 +```sql + select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; ``` -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -654,8 +658,10 @@ It costs 0.002s **示例 3:** 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照序列名降序排列。 +```sql + select last * from root.ln.wf01.wt01 order by timeseries desc; ``` -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -668,8 +674,10 @@ It costs 0.002s **示例 4:** 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照dataType降序排列。 +```sql + select last * from root.ln.wf01.wt01 order by dataType desc; ``` -IoTDB> select last * from root.ln.wf01.wt01 order by dataType desc; +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -682,8 +690,10 @@ It costs 0.002s **注意:** 可以通过函数组合方式实现其他过滤条件查询最新点的需求,例如 +```sql + select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device; ``` -IoTDB> select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +```shell +-----------------+---------------------+----------------+-----------------------+------------------+ | Device|max_time(temperature)|max_time(status)|last_value(temperature)|last_value(status)| +-----------------+---------------------+----------------+-----------------------+------------------+ @@ -705,7 +715,7 @@ It costs 0.021s ### 3.1 时间过滤条件 -使用时间过滤条件可以筛选特定时间范围的数据。对于时间戳支持的格式,请参考 [时间戳类型](../Background-knowledge/Data-Type_timecho.md) 。 +使用时间过滤条件可以筛选特定时间范围的数据。对于时间戳支持的格式,请参考 [时间戳类型](../Background-knowledge/Data-Type.md) 。 示例如下: @@ -793,8 +803,10 @@ It costs 0.021s **示例 1:** 查询 `root.sg.d1` 下 `value` 含有`'cc'`的数据。 +```sql + select * from root.sg.d1 where value like '%cc%'; ``` -IoTDB> select * from root.sg.d1 where value like '%cc%' +```shell +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -807,8 +819,10 @@ It costs 0.002s **示例 2:** 查询 `root.sg.d1` 下 `value` 中间为 `'b'`、前后为任意单个字符的数据。 +```sql + select * from root.sg.device where value like '_b_'; ``` -IoTDB> select * from root.sg.device where value like '_b_' +```shell +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -824,7 +838,7 @@ It costs 0.002s **常见的正则匹配举例:** -``` +```shell 长度为3-20的所有字符:^.{3,20}$ 大写英文字符:^[A-Z]+$ 数字和英文字符:^[A-Za-z0-9]+$ @@ -833,8 +847,10 @@ It costs 0.002s **示例 1:** 查询 root.sg.d1 下 value 值为26个英文字符组成的字符串。 +```sql + select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +``` ```shell -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -847,8 +863,10 @@ It costs 0.002s **示例 2:** 查询 root.sg.d1 下 value 值为26个小写英文字符组成的字符串且时间大于100的。 +```sql + select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; +``` ```shell -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -903,7 +921,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -943,7 +961,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -968,7 +986,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -1008,7 +1026,7 @@ select count(status) from root.ln.wf01.wt01 where time > 2017-11-01T01:00:00 gro 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1052,7 +1070,7 @@ select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019- 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1084,7 +1102,7 @@ select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017 SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1139,7 +1157,7 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) 5. 当前暂不支持与`GROUP BY LEVEL`搭配使用。 使用如下的原始数据,接下来会给出几个事件分段查询的使用样例 -``` +```shell +-----------------------------+-------+-------+-------+--------+-------+-------+ | Time| s1| s2| s3| s4| s5| s6| +-----------------------------+-------+-------+-------+--------+-------+-------+ @@ -1159,10 +1177,10 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) ##### delta=0时的等值事件分段 使用如下sql语句 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); ``` 得到如下的查询结果,这里忽略了s6为null的行 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1174,10 +1192,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( ``` 当指定ignoreNull为false时,会将s6为null的数据也考虑进来 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); ``` 得到如下的结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1193,10 +1211,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( ##### delta!=0时的差值事件分段 使用如下sql语句 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); ``` 得到如下的查询结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1208,10 +1226,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( group by子句中的controlExpression同样支持列的表达式 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10); ``` 得到如下的查询结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1247,7 +1265,7 @@ keep表达式用来指定形成分组所需要连续满足`predict`条件的数 对于如下原始数据,下面会给出几个查询样例: -``` +```shell +-----------------------------+-------------------------+-------------------------------------+------------------------------------+ | Time|root.sg.beijing.car01.soc|root.sg.beijing.car01.charging_status|root.sg.beijing.car01.vehicle_status| +-----------------------------+-------------------------+-------------------------------------+------------------------------------+ @@ -1265,10 +1283,10 @@ keep表达式用来指定形成分组所需要连续满足`predict`条件的数 ``` 查询至少连续两行以上的charging_status=1的数据,sql语句如下: ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true); ``` 得到结果如下: -``` +```shell +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ | Time|max_time(root.sg.beijing.car01.charging_status)|count(root.sg.beijing.car01.vehicle_status)|last_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ @@ -1278,10 +1296,10 @@ select max_time(charging_status),count(vehicle_status),last_value(soc) from root ``` 当设置`ignoreNull`为false时,遇到null值为将其视为一个不满足条件的行,会结束正在计算的分组。 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false); ``` 得到如下结果,原先的分组被含null的行拆分: -``` +```shell +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ | Time|max_time(root.sg.beijing.car01.charging_status)|count(root.sg.beijing.car01.vehicle_status)|last_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ @@ -1310,7 +1328,7 @@ group by session(timeInterval) 3. 当前暂不支持与`GROUP BY LEVEL`搭配使用。 对于下面的原始数据,给出几个查询样例。 -``` +```shell +-----------------------------+-----------------+-----------+--------+------+ | Time| Device|temperature|hardware|status| +-----------------------------+-----------------+-----------+--------+------+ @@ -1341,10 +1359,10 @@ group by session(timeInterval) ``` 可以按照不同的时间单位设定时间间隔,sql语句如下: ```sql -select __endTime,count(*) from root.** group by session(1d) +select __endTime,count(*) from root.** group by session(1d); ``` 得到如下结果: -``` +```shell +-----------------------------+-----------------------------+------------------------------------+---------------------------------+-------------------------------+ | Time| __endTime|count(root.ln.wf02.wt01.temperature)|count(root.ln.wf02.wt01.hardware)|count(root.ln.wf02.wt01.status)| +-----------------------------+-----------------------------+------------------------------------+---------------------------------+-------------------------------+ @@ -1354,10 +1372,10 @@ select __endTime,count(*) from root.** group by session(1d) ``` 也可以和`HAVING`、`ALIGN BY DEVICE`共同使用 ```sql -select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` 得到如下结果,其中排除了`sum(hardware)`为0的部分 -``` +```shell +-----------------------------+-----------------+-----------------------------+-------------+ | Time| Device| __endTime|sum(hardware)| +-----------------------------+-----------------+-----------------------------+-------------+ @@ -1392,7 +1410,7 @@ group by count(controlExpression, size[,ignoreNull=true/false]) 4. 当一个分组内最终的点数不满足`size`的数量时,不会输出该分组的结果 对于下面的原始数据,给出几个查询样例。 -``` +```shell +-----------------------------+-----------+-----------------------+ | Time|root.sg.soc|root.sg.charging_status| +-----------------------------+-----------+-----------------------+ @@ -1410,10 +1428,10 @@ group by count(controlExpression, size[,ignoreNull=true/false]) ``` sql语句如下 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); ``` 得到如下结果,其中由于第二个1970-01-01T08:00:00.006+08:00到1970-01-01T08:00:00.010+08:00的窗口中包含四个点,不符合`size = 5`的条件,因此不被输出 -``` +```shell +-----------------------------+-----------------------------+--------------------------------------+ | Time| __endTime|first_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------+--------------------------------------+ @@ -1422,10 +1440,10 @@ select count(charging_stauts), first_value(soc) from root.sg group by count(char ``` 而当使用ignoreNull将null值也考虑进来时,可以得到两个点计数为5的窗口,sql如下 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` 得到如下结果 -``` +```shell +-----------------------------+-----------------------------+--------------------------------------+ | Time| __endTime|first_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------+--------------------------------------+ @@ -1445,12 +1463,12 @@ select count(charging_stauts), first_value(soc) from root.sg group by count(char **示例1:** 不同 database 下均存在名为 status 的序列, 如 "root.ln.wf01.wt01.status", "root.ln.wf02.wt02.status", 以及 "root.sgcc.wf03.wt01.status", 如果需要统计不同 database 下 status 序列的数据点个数,使用以下查询: ```sql -select count(status) from root.** group by level = 1 +select count(status) from root.** group by level = 1; ``` 运行结果为: -``` +```shell +-------------------------+---------------------------+ |count(root.ln.*.*.status)|count(root.sgcc.*.*.status)| +-------------------------+---------------------------+ @@ -1463,12 +1481,12 @@ It costs 0.003s **示例2:** 统计不同设备下 status 序列的数据点个数,可以规定 level = 3, ```sql -select count(status) from root.** group by level = 3 +select count(status) from root.** group by level = 3; ``` 运行结果为: -``` +```shell +---------------------------+---------------------------+ |count(root.*.*.wt01.status)|count(root.*.*.wt02.status)| +---------------------------+---------------------------+ @@ -1483,12 +1501,12 @@ It costs 0.003s **示例3:** 统计不同 database 下的不同设备中 status 序列的数据点个数,可以使用以下查询: ```sql -select count(status) from root.** group by level = 1, 3 +select count(status) from root.** group by level = 1, 3; ``` 运行结果为: -``` +```shell +----------------------------+----------------------------+------------------------------+ |count(root.ln.*.wt01.status)|count(root.ln.*.wt02.status)|count(root.sgcc.*.wt01.status)| +----------------------------+----------------------------+------------------------------+ @@ -1501,12 +1519,12 @@ It costs 0.003s **示例4:** 查询所有序列下温度传感器 temperature 的最大值,可以使用下列查询语句: ```sql -select max_value(temperature) from root.** group by level = 0 +select max_value(temperature) from root.** group by level = 0; ``` 运行结果: -``` +```shell +---------------------------------+ |max_value(root.*.*.*.temperature)| +---------------------------------+ @@ -1519,12 +1537,12 @@ It costs 0.013s **示例5:** 上面的查询都是针对某一个传感器,特别地,**如果想要查询某一层级下所有传感器拥有的总数据点数,则需要显式规定测点为 `*`** ```sql -select count(*) from root.ln.** group by level = 2 +select count(*) from root.ln.** group by level = 2; ``` 运行结果: -``` +```shell +----------------------+----------------------+ |count(root.*.wf01.*.*)|count(root.*.wf02.*.*)| +----------------------+----------------------+ @@ -1548,7 +1566,7 @@ select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017 结果: -``` +```shell +-----------------------------+-------------------------+ | Time|COUNT(root.ln.*.*.status)| +-----------------------------+-------------------------+ @@ -1570,7 +1588,7 @@ It costs 0.006s select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; ``` -``` +```shell +-----------------------------+-------------------------+ | Time|COUNT(root.ln.*.*.status)| +-----------------------------+-------------------------+ @@ -1661,7 +1679,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); 该查询会将具有同一个 `city` 标签值的时间序列的所有满足查询条件的点做平均值计算,计算结果如下 -``` +```shell +--------+------------------+ | city| avg(temperature)| +--------+------------------+ @@ -1692,7 +1710,7 @@ SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); 查询结果如下 -``` +```shell +--------+--------+------------------+ | city|workshop| avg(temperature)| +--------+--------+------------------+ @@ -1722,7 +1740,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 查询结果如下 -``` +```shell +-----------------------------+--------+--------+------------------+ | Time| city|workshop| avg(temperature)| +-----------------------------+--------+--------+------------------+ @@ -1762,16 +1780,16 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 下列使用方式是不正确的: ```sql - select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 - select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 + select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; + select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; ``` 2. 对`GROUP BY LEVEL`结果进行过滤时,`SELECT`和`HAVING`中出现的PATH只能有一级。 下列使用方式是不正确的: ```sql - select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 - select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 + select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; + select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` **SQL 示例:** @@ -1780,7 +1798,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 对于以下聚合结果进行过滤: - ``` + ```shell +-----------------------------+---------------------+---------------------+ | Time|count(root.test.*.s1)|count(root.test.*.s2)| +-----------------------------+---------------------+---------------------+ @@ -1798,7 +1816,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 执行结果如下: - ``` + ```shell +-----------------------------+---------------------+ | Time|count(root.test.*.s1)| +-----------------------------+---------------------+ @@ -1811,7 +1829,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS - **示例 2:** 对于以下聚合结果进行过滤: - ``` + ```shell +-----------------------------+-------------+---------+---------+ | Time| Device|count(s1)|count(s2)| +-----------------------------+-------------+---------+---------+ @@ -1834,7 +1852,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 执行结果如下: - ``` + ```shell +-----------------------------+-------------+---------+---------+ | Time| Device|count(s1)|count(s2)| +-----------------------------+-------------+---------+---------+ @@ -1899,7 +1917,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: 查询结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -1928,7 +1946,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `PREVIOUS` 填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -1950,9 +1968,9 @@ Total line number = 4 例如,原始数据如下所示: ```sql -select s1 from root.db.d1 -``` +select s1 from root.db.d1; ``` +```shell +-----------------------------+-------------+ | Time|root.db.d1.s1| +-----------------------------+-------------+ @@ -1969,9 +1987,9 @@ select s1 from root.db.d1 ```sql select avg(s1) from root.db.d1 - group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) -``` + group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m); ``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2005,7 +2023,7 @@ select avg(s1) group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) FILL(PREVIOUS); ``` -``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2039,7 +2057,7 @@ from root.db.d1 group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) FILL(PREVIOUS, 2m); ``` -``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2082,7 +2100,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `LINEAR` 填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2120,7 +2138,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `FLOAT` 类型的常量填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2143,7 +2161,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `BOOLEAN` 类型的常量填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2182,7 +2200,7 @@ Total line number = 4 SQL 语句: ```sql -select status, temperature from root.ln.wf01.wt01 limit 10 +select status, temperature from root.ln.wf01.wt01 limit 10; ``` 含义: @@ -2191,7 +2209,7 @@ select status, temperature from root.ln.wf01.wt01 limit 10 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2215,7 +2233,7 @@ It costs 0.000s SQL 语句: ```sql -select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; ``` 含义: @@ -2224,7 +2242,7 @@ select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2243,7 +2261,7 @@ It costs 0.342s SQL 语句: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:00.000 and time< 2024-07-12T00:12:00.000 limit 5 offset 3 +select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:00.000 and time< 2024-07-12T00:12:00.000 limit 5 offset 3; ``` 含义: @@ -2252,7 +2270,7 @@ select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:0 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2264,14 +2282,14 @@ select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:0 +-----------------------------+------------------------+-----------------------------+ Total line number = 5 It costs 0.070s -`` +```` - **示例 4:** `LIMIT` 子句与 `GROUP BY` 子句组合 SQL 语句: ```sql -select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3 +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3; ``` 含义: @@ -2280,7 +2298,7 @@ SQL 语句子句要求返回查询结果的第 3 至 6 行(第一行编号为 结果如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -2310,7 +2328,7 @@ It costs 0.016s SQL 语句: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; ``` 含义: @@ -2319,7 +2337,7 @@ select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 结果如下所示: -``` +```shell +-----------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.temperature| +-----------------------------+-----------------------------+ @@ -2339,7 +2357,7 @@ It costs 0.000s SQL 语句: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; ``` 含义: @@ -2348,7 +2366,7 @@ select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 结果如下所示: -``` +```shell +-----------------------------+------------------------+ | Time|root.ln.wf01.wt01.status| +-----------------------------+------------------------+ @@ -2368,12 +2386,12 @@ It costs 0.003s SQL 语句: ```sql -select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` 含义: -``` +```shell +-----------------------------+-----------------------------------+ | Time|max_value(root.ln.wf01.wt01.status)| +-----------------------------+-----------------------------------+ @@ -2394,7 +2412,7 @@ It costs 0.000s SQL 语句: ```sql -select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` 含义: @@ -2403,7 +2421,7 @@ select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 结果如下所示: -``` +```shell +-----------------------------+-----------------------------+------------------------+ | Time|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+-----------------------------+------------------------+ @@ -2431,7 +2449,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; ``` 执行结果: -``` +```shell +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ | Time|root.ln.wf02.wt02.hardware|root.ln.wf02.wt02.status|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ @@ -2463,7 +2481,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc, ``` 执行结果: -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2481,7 +2499,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc, select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; ``` 执行结果: -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2498,7 +2516,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,dev select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 结果如图所示,可以看出,`ORDER BY DEVICE ASC,TIME ASC`就是默认情况下的排序方式,由于`ASC`是默认排序顺序,此处可以省略。 -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2512,10 +2530,10 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 同样,可以在聚合查询中使用`ALIGN BY DEVICE`和`ORDER BY`子句,对聚合后的结果进行排序,示例代码如下所示: ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` 执行结果: -``` +```shell +-----------------------------+-----------------+---------------+-------------+------------------+ | Time| Device|count(hardware)|count(status)|count(temperature)| +-----------------------------+-----------------+---------------+-------------+------------------+ @@ -2534,7 +2552,7 @@ select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11 排序在通过`ASC`,`DESC`指定排序顺序的同时,可以通过`NULLS`语法来指定NULL值在排序中的优先级,`NULLS FIRST`默认NULL值在结果集的最上方,`NULLS LAST`则保证NULL值在结果集的最后。如果没有在子句中指定,则默认顺序为`ASC`,`NULLS LAST`。 对于如下的数据,将给出几个任意表达式的查询示例供参考: -``` +```shell +-----------------------------+-------------+-------+-------+--------+-------+ | Time| Device| base| score| bonus| total| +-----------------------------+-------------+-------+-------+--------+-------+ @@ -2555,11 +2573,11 @@ select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11 当需要根据基础分数score对结果进行排序时,可以直接使用 ```Sql -select score from root.** order by score desc align by device +select score from root.** order by score desc align by device; ``` 会得到如下结果 -``` +```shell +-----------------------------+---------+-----+ | Time| Device|score| +-----------------------------+---------+-----+ @@ -2578,15 +2596,15 @@ select score from root.** order by score desc align by device 当想要根据总分对结果进行排序,可以在order by子句中使用表达式进行计算 ```Sql -select score,total from root.one order by base+score+bonus desc +select score,total from root.one order by base+score+bonus desc; ``` 该sql等价于 ```Sql -select score,total from root.one order by total desc +select score,total from root.one order by total desc; ``` 得到如下结果 -``` +```shell +-----------------------------+--------------+--------------+ | Time|root.one.score|root.one.total| +-----------------------------+--------------+--------------+ @@ -2601,10 +2619,10 @@ select score,total from root.one order by total desc select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device + time desc align by device; ``` 得到如下结果 -``` +```shell +-----------------------------+----------+----+-----+-----+-----+ | Time| Device|base|score|bonus|total| +-----------------------------+----------+----+-----+-----+-----+ @@ -2625,10 +2643,10 @@ select base, score, bonus, total from root.** order by total desc NULLS Last, ``` 在order by中同样可以使用聚合查询表达式 ```Sql -select min_value(total) from root.** order by min_value(total) asc align by device +select min_value(total) from root.** order by min_value(total) asc align by device; ``` 得到如下结果 -``` +```shell +----------+----------------+ | Device|min_value(total)| +----------+----------------+ @@ -2641,11 +2659,11 @@ select min_value(total) from root.** order by min_value(total) asc align by devi ``` 当在查询中指定多列,未被排序的列会随着行和排序列一起改变顺序,当排序列相同时行的顺序和具体实现有关(没有固定顺序) ```Sql -select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; ``` 得到结果如下 -· -``` + +```shell +----------+----------------+---------------+ | Device|min_value(total)|max_value(base)| +----------+----------------+---------------+ @@ -2659,10 +2677,10 @@ select min_value(total),max_value(base) from root.** order by max_value(total) d Order by device, time可以和order by expression共同使用 ```Sql -select score from root.** order by device asc, score desc, time asc align by device +select score from root.** order by device asc, score desc, time asc align by device; ``` 会得到如下结果 -``` +```shell +-----------------------------+---------+-----+ | Time| Device|score| +-----------------------------+---------+-----+ @@ -2705,7 +2723,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; 执行如下: -``` +```shell +-----------------------------+-----------------+-----------+------+--------+ | Time| Device|temperature|status|hardware| +-----------------------------+-----------------+-----------+------+--------+ @@ -2772,8 +2790,10 @@ intoItem 下面通过示例进一步说明: - **示例 1**(按时间对齐) +```sql + select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +``` ```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +--------------+-------------------+--------+ | source column| target timeseries| written| +--------------+-------------------+--------+ @@ -2799,8 +2819,10 @@ It costs 0.725s > - `written` 表示预期写入的数据量。 - **示例 2**(按时间对齐) +```sql + select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +``` ```shell -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +--------------------------------------+-------------------------+--------+ | source column| target timeseries| written| +--------------------------------------+-------------------------+--------+ @@ -2815,8 +2837,10 @@ It costs 0.375s 该语句将聚合查询的结果存储到指定序列中。 - **示例 3**(按设备对齐) +```sql + select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +``` ```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +--------------+--------------+-------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+-------------------+--------+ @@ -2837,8 +2861,10 @@ It costs 0.625s > 按设备对齐查询时,`CLI` 展示的结果集多出一列 `source device` 列表示查询的设备。 - **示例 4**(按设备对齐) +```sql + select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +``` ```shell -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +--------------+--------------+------------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+------------------------+--------+ @@ -2975,7 +3001,7 @@ select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from #### 其他要注意的点 - 对于一般的聚合查询,时间戳是无意义的,约定使用 0 来存储。 -- 当目标序列存在时,需要保证源序列和目标时间序列的数据类型兼容。关于数据类型的兼容性,查看文档 [数据类型](../Background-knowledge/Data-Type_timecho.md#数据类型兼容性)。 +- 当目标序列存在时,需要保证源序列和目标时间序列的数据类型兼容。关于数据类型的兼容性,查看文档 [数据类型](../Background-knowledge/Data-Type.md#数据类型兼容性)。 - 当目标序列不存在时,系统将自动创建目标序列(包括 database)。 - 当查询的序列不存在或查询的序列不存在数据,则不会自动创建目标序列。 @@ -2983,8 +3009,10 @@ select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from #### 实现 IoTDB 内部 ETL 对原始数据进行 ETL 处理后写入新序列。 +```sql +SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +``` ```shell -IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +--------------+-------------------+---------------------------+--------+ | source device| source column| target timeseries| written| +--------------+-------------------+---------------------------+--------+ @@ -3001,8 +3029,10 @@ IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) #### 查询结果存储 将查询结果进行持久化存储,起到类似物化视图的作用。 +```sql +SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +``` ```shell -IOTDB > SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +--------------------------+-----------------------------+--------+ | source column| target timeseries| written| +--------------------------+-----------------------------+--------+ @@ -3020,8 +3050,10 @@ It costs 0.115s **注意:** 建议配合使用 `LIMIT & OFFSET` 子句或 `WHERE` 子句(时间过滤条件)对数据进行分批,防止单次操作的数据量过大。 +```sql +SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +``` ```shell -IOTDB > SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +--------------------------+----------------------+--------+ | source column| target timeseries| written| +--------------------------+----------------------+--------+ diff --git a/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_apache.md b/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_apache.md index 98a2bde25..cb4b7f3ee 100644 --- a/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_apache.md +++ b/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_apache.md @@ -7,32 +7,32 @@ #### 创建数据库 ```sql -CREATE DATABASE root.ln +CREATE DATABASE root.ln; ``` #### 查看数据库 ```sql -show databases -show databases root.* -show databases root.** +show databases; +show databases root.*; +show databases root.**; ``` #### 删除数据库 ```sql -DELETE DATABASE root.ln -DELETE DATABASE root.sgcc -DELETE DATABASE root.** +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +DELETE DATABASE root.**; ``` #### 统计数据库数量 ```sql -count databases -count databases root.* -count databases root.sgcc.* -count databases root.sgcc +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` ### 1.2 时间序列管理 @@ -40,161 +40,161 @@ count databases root.sgcc #### 创建时间序列 ```sql -create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - 简化版 ```sql -create timeseries root.ln.wf01.wt01.status BOOLEAN -create timeseries root.ln.wf01.wt01.temperature FLOAT -create timeseries root.ln.wf02.wt02.hardware TEXT -create timeseries root.ln.wf02.wt02.status BOOLEAN -create timeseries root.sgcc.wf03.wt01.status BOOLEAN -create timeseries root.sgcc.wf03.wt01.temperature FLOAT +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` - 错误提示 ```sql -create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN -> error: encoding TS_2DIFF does not support BOOLEAN +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN; +> error: encoding TS_2DIFF does not support BOOLEAN; ``` #### 创建对齐时间序列 ```sql -CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` #### 删除时间序列 ```sql -delete timeseries root.ln.wf01.wt01.status -delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -delete timeseries root.ln.wf02.* -drop timeseries root.ln.wf02.* +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` #### 查看时间序列 ```sql -SHOW TIMESERIES -SHOW TIMESERIES -SHOW TIMESERIES root.** -SHOW TIMESERIES root.ln.** -SHOW TIMESERIES root.ln.** limit 10 offset 10 -SHOW TIMESERIES root.ln.** where timeseries contains 'wf01.wt' -SHOW TIMESERIES root.ln.** where dataType=FLOAT +SHOW TIMESERIES; +SHOW TIMESERIES ; +SHOW TIMESERIES root.**; +SHOW TIMESERIES root.ln.**; +SHOW TIMESERIES root.ln.** limit 10 offset 10; +SHOW TIMESERIES root.ln.** where timeseries contains 'wf01.wt'; +SHOW TIMESERIES root.ln.** where dataType=FLOAT; SHOW TIMESERIES root.ln.** where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -SHOW LATEST TIMESERIES +SHOW LATEST TIMESERIES; ``` #### 统计时间序列数量 ```sql -COUNT TIMESERIES root.** -COUNT TIMESERIES root.ln.** -COUNT TIMESERIES root.ln.*.*.status -COUNT TIMESERIES root.ln.wf01.wt01.status -COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc'; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c'; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c'; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; COUNT TIMESERIES root.** WHERE time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -COUNT TIMESERIES root.** GROUP BY LEVEL=1 -COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` #### 标签点管理 ```sql -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` - 重命名标签或属性 ```sql -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` - 重新设置标签或属性的值 ```sql -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` - 删除已经存在的标签或属性 ```sql -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` - 添加新的标签 ```sql -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` - 添加新的属性 ```sql -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` - 更新插入别名,标签和属性 ```sql -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4); ``` - 使用标签作为过滤条件查询时间序列 ```sql -SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause +SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; ``` 返回给定路径的下的所有满足条件的时间序列信息: ```sql -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` - 使用标签作为过滤条件统计时间序列数量 ```sql -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` 返回给定路径的下的所有满足条件的时间序列的数量: ```sql -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` 创建对齐时间序列: ```sql -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` 支持查询: ```sql -show timeseries where TAGS(tag1)='v1' +show timeseries where TAGS(tag1)='v1'; ``` ### 1.3 时间序列路径管理 @@ -202,54 +202,48 @@ show timeseries where TAGS(tag1)='v1' #### 查看路径的所有子路径 ```sql -SHOW CHILD PATHS pathPattern -- 查询 root.ln 的下一层:show child paths root.ln -- 查询形如 root.xx.xx.xx 的路径:show child paths root.*.* +SHOW CHILD PATHS pathPattern; +- 查询 root.ln 的下一层; +show child paths root.ln; +- 查询形如 root.xx.xx.xx 的路径; +show child paths root.*.*; ``` #### 查看路径的所有子节点 ```sql -SHOW CHILD NODES pathPattern - -- 查询 root 的下一层:show child nodes root -- 查询 root.ln 的下一层 :show child nodes root.ln +SHOW CHILD NODES pathPattern; +- 查询 root 的下一层; +show child nodes root; +- 查询 root.ln 的下一层; +show child nodes root.ln; ``` #### 查看设备 ```sql -IoTDB> show devices - -IoTDB> show devices root.ln.** - -IoTDB> show devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +show devices; +show devices root.ln.**; +show devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; ``` ##### 查看设备及其 database 信息 ```sql -IoTDB> show devices with database - -IoTDB> show devices root.ln.** with database +show devices with database; +show devices root.ln.** with database; ``` #### 统计节点数 ```sql -IoTDB > COUNT NODES root.** LEVEL=2 - -IoTDB > COUNT NODES root.ln.** LEVEL=2 - -IoTDB > COUNT NODES root.ln.wf01.* LEVEL=3 - -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.* LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` #### 统计设备数量 ```sql - -IoTDB> count devices - -IoTDB> count devices root.ln.** - -IoTDB> count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +count devices; +count devices root.ln.**; +count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; ``` ### 1.4 设备模板管理 @@ -262,136 +256,106 @@ IoTDB> count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26: #### 创建设备模板 ```sql -CREATE DEVICE TEMPLATE ALIGNED? '(' [',' ]+ ')' +CREATE DEVICE TEMPLATE ALIGNED? '(' [',' ]+ ')'; ``` 创建包含两个非对齐序列的设备模板 ```sql -IoTDB> create device template t1 (temperature FLOAT, status BOOLEAN) +create device template t1 (temperature FLOAT, status BOOLEAN); ``` 创建包含一组对齐序列的设备模板 ```sql -IoTDB> create device template t2 aligned (lat FLOAT, lon FLOAT) +create device template t2 aligned (lat FLOAT, lon FLOAT); ``` #### 挂载设备模板 ```sql -IoTDB> set DEVICE TEMPLATE t1 to root.sg1 +set DEVICE TEMPLATE t1 to root.sg1; ``` #### 激活设备模板 ```sql -IoTDB> create timeseries using DEVICE TEMPLATE on root.sg1.d1 - -IoTDB> set DEVICE TEMPLATE t1 to root.sg1.d1 - -IoTDB> set DEVICE TEMPLATE t2 to root.sg1.d2 - -IoTDB> create timeseries using device template on root.sg1.d1 - -IoTDB> create timeseries using device template on root.sg1.d2 +create timeseries using DEVICE TEMPLATE on root.sg1.d1; +set DEVICE TEMPLATE t1 to root.sg1.d1; +set DEVICE TEMPLATE t2 to root.sg1.d2; +create timeseries using device template on root.sg1.d1; +create timeseries using device template on root.sg1.d2; ``` #### 查看设备模板 ```sql -IoTDB> show device templates +show device templates; ``` - 查看某个设备模板下的物理量 ```sql -IoTDB> show nodes in device template t1 +show nodes in device template t1; ``` - 查看挂载了某个设备模板的路径 ```sql -IoTDB> show paths set device template t1 +show paths set device template t1; ``` - 查看使用了某个设备模板的路径(即模板在该路径上已激活,序列已创建) ```sql -IoTDB> show paths using device template t1 +show paths using device template t1; ``` #### 解除设备模板 ```sql -IoTDB> delete timeseries of device template t1 from root.sg1.d1 -``` -```sql -IoTDB> deactivate device template t1 from root.sg1.d1 +delete timeseries of device template t1 from root.sg1.d1; +deactivate device template t1 from root.sg1.d1; ``` 批量处理 ```sql -IoTDB> delete timeseries of device template t1 from root.sg1.*, root.sg2.* -``` -```sql -IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* +delete timeseries of device template t1 from root.sg1.*, root.sg2.*; +deactivate device template t1 from root.sg1.*, root.sg2.*; ``` #### 卸载设备模板 ```sql -IoTDB> unset device template t1 from root.sg1.d1 +unset device template t1 from root.sg1.d1; ``` #### 删除设备模板 ```sql -IoTDB> drop device template t1 +drop device template t1; ``` ### 1.5 数据存活时间管理 #### 设置 TTL ```sql -IoTDB> set ttl to root.ln 3600000 -``` -```sql -IoTDB> set ttl to root.sgcc.** 3600000 -``` -```sql -IoTDB> set ttl to root.** 3600000 +set ttl to root.ln 3600000; +set ttl to root.sgcc.** 3600000; +set ttl to root.** 3600000; ``` #### 取消 TTL ```sql -IoTDB> unset ttl from root.ln -``` -```sql -IoTDB> unset ttl from root.sgcc.** -``` -```sql -IoTDB> unset ttl from root.** +unset ttl from root.ln; +unset ttl from root.sgcc.**; +unset ttl from root.**; ``` #### 显示 TTL ```sql -IoTDB> SHOW ALL TTL -``` -```sql -IoTDB> SHOW TTL ON pathPattern -``` -```sql -IoTDB> show DEVICES +SHOW ALL TTL; +SHOW TTL ON pathPattern; +show DEVICES; ``` ## 2. 写入数据 ### 2.1 写入单列数据 ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) -``` -```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'),(2, 'v1') +insert into root.ln.wf02.wt02(timestamp,status) values(1,true); +insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'),(2, 'v1'); ``` ### 2.2 写入多列数据 ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) values (2, false, 'v2') -``` -```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4') +insert into root.ln.wf02.wt02(timestamp, status, hardware) values (2, false, 'v2'); +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4'); ``` ### 2.3 使用服务器时间戳 ```sql -IoTDB > insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2') +insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2'); ``` ### 2.4 写入对齐时间序列数据 ```sql -IoTDB > create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE) -``` -```sql -IoTDB > insert into root.sg1.d1(timestamp, s1, s2) aligned values(1, 1, 1) -``` -```sql -IoTDB > insert into root.sg1.d1(timestamp, s1, s2) aligned values(2, 2, 2), (3, 3, 3) -``` -```sql -IoTDB > select * from root.sg1.d1 +create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE); +insert into root.sg1.d1(timestamp, s1, s2) aligned values(1, 1, 1); +insert into root.sg1.d1(timestamp, s1, s2) aligned values(2, 2, 2), (3, 3, 3); +select * from root.sg1.d1; ``` ### 2.5 加载 TsFile 文件数据 @@ -417,43 +381,24 @@ load '' [sglevel=int][onSuccess=delete/none] ### 3.1 删除单列数据 ```sql delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; -``` -```sql delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -``` -```sql -delete from root.ln.wf02.wt02.status where time < 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time <= 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time < 20 and time > 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time > 20 -``` -```sql -delete from root.ln.wf02.wt02.status where time >= 20 -``` -```sql -delete from root.ln.wf02.wt02.status where time = 20 +delete from root.ln.wf02.wt02.status where time < 10; +delete from root.ln.wf02.wt02.status where time <= 10; +delete from root.ln.wf02.wt02.status where time < 20 and time > 10; +delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10; +delete from root.ln.wf02.wt02.status where time > 20; +delete from root.ln.wf02.wt02.status where time >= 20; +delete from root.ln.wf02.wt02.status where time = 20; ``` 出错: ```sql -delete from root.ln.wf02.wt02.status where time > 4 or time < 0 - -Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic - -expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' +delete from root.ln.wf02.wt02.status where time > 4 or time < 0; +Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' ``` 删除时间序列中的所有数据: ```sql -delete from root.ln.wf02.wt02.status +delete from root.ln.wf02.wt02.status; ``` ### 3.2 删除多列数据 ```sql @@ -461,8 +406,7 @@ delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; ``` 声明式的编程方式: ```sql -IoTDB> delete from root.ln.wf03.wt02.status where time < now() - +delete from root.ln.wf03.wt02.status where time < now(); Msg: The statement is executed successfully. ``` ## 4. 数据查询 @@ -471,7 +415,7 @@ Msg: The statement is executed successfully. #### 时间过滤查询 ```sql -select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` #### 根据一个时间区间选择多列数据 ```sql @@ -502,9 +446,7 @@ select s1 as temperature, s2 as speed from root.ln.wf01.wt01; 不支持: ```sql select s1, count(s1) from root.sg.d1; - select sin(s1), count(s1) from root.sg.d1; - select s1, count(s1) from root.sg.d1 group by ([10,100),10ms); ``` ##### 时间序列查询嵌套表达式 @@ -512,67 +454,50 @@ select s1, count(s1) from root.sg.d1 group by ([10,100),10ms); 示例 1: ```sql select a, - -​ b, - -​ ((a + 1) * 2 - 1) % 2 + 1.5, - -​ sin(a + sin(a + sin(b))), - -​ -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 - + b, + ((a + 1) * 2 - 1) % 2 + 1.5, + sin(a + sin(a + sin(b))), + -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 from root.sg1; ``` 示例 2: ```sql -select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; +``` 示例 3: - -select (a + *) / 2 from root.sg1 - +```sql +select (a + *) / 2 from root.sg1; +``` 示例 4: - -select (a + b) * 3 from root.sg, root.ln +```sql +select (a + b) * 3 from root.sg, root.ln; ``` ##### 聚合查询嵌套表达式 示例 1: ```sql select avg(temperature), - -​ sin(avg(temperature)), - -​ avg(temperature) + 1, - -​ -sum(hardware), - -​ avg(temperature) + sum(hardware) - + sin(avg(temperature)), + avg(temperature) + 1, + -sum(hardware), + avg(temperature) + sum(hardware) from root.ln.wf01.wt01; ``` 示例 2: ```sql select avg(*), - -​ (avg(*) + 1) * 3 / 2 -1 - -from root.sg1 + (avg(*) + 1) * 3 / 2 -1 +from root.sg1; ``` 示例 3: ```sql select avg(temperature), - -​ sin(avg(temperature)), - -​ avg(temperature) + 1, - -​ -sum(hardware), - -​ avg(temperature) + sum(hardware) as custom_sum - + sin(avg(temperature)), + avg(temperature) + 1, + -sum(hardware), + avg(temperature) + sum(hardware) as custom_sum from root.ln.wf01.wt01 - GROUP BY([10, 90), 10ms); ``` #### 最新点查询 @@ -585,15 +510,15 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < 查询 root.ln.wf01.wt01.status 的最新数据点 ```sql -IoTDB> select last status from root.ln.wf01.wt01 +select last status from root.ln.wf01.wt01; ``` 查询 root.ln.wf01.wt01 下 status,temperature 时间戳大于等于 2017-11-07T23:50:00 的最新数据点 ```sql -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; ``` 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照序列名降序排列 ```sql -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +select last * from root.ln.wf01.wt01 order by timeseries desc; ``` ### 4.3 查询过滤条件 @@ -648,20 +573,20 @@ select code from root.sg1.d1 where temperature is not null; 查询 `root.sg.d1` 下 `value` 含有`'cc'`的数据 ```sql -IoTDB> select * from root.sg.d1 where value like '%cc%' +select * from root.sg.d1 where value like '%cc%'; ``` 查询 `root.sg.d1` 下 `value` 中间为 `'b'`、前后为任意单个字符的数据 ```sql -IoTDB> select * from root.sg.device where value like '_b_' +select * from root.sg.device where value like '_b_'; ``` 查询 root.sg.d1 下 value 值为26个英文字符组成的字符串 ```sql -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; ``` 查询 root.sg.d1 下 value 值为26个小写英文字符组成的字符串且时间大于100的 ```sql -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; ``` ### 4.4 分段分组聚合 @@ -704,23 +629,23 @@ select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017 统计不同 database 下 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 1 +select count(status) from root.** group by level = 1; ``` 统计不同设备下 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 3 +select count(status) from root.** group by level = 3; ``` 统计不同 database 下的不同设备中 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 1, 3 +select count(status) from root.** group by level = 1, 3; ``` 查询所有序列下温度传感器 temperature 的最大值 ```sql -select max_value(temperature) from root.** group by level = 0 +select max_value(temperature) from root.** group by level = 0; ``` 查询某一层级下所有传感器拥有的总数据点数 ```sql -select count(*) from root.ln.** group by level = 2 +select count(*) from root.ln.** group by level = 2; ``` #### 标签分组聚合 @@ -738,19 +663,19 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS ``` #### 差值分段聚合 ```sql -group by variation(controlExpression[,delta][,ignoreNull=true/false]) +group by variation(controlExpression[,delta][,ignoreNull=true/false]); ``` ##### delta=0时的等值事件分段 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); ``` 指定ignoreNull为false ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); ``` ##### delta!=0时的差值事件分段 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); ``` #### 条件分段聚合 ```sql @@ -758,11 +683,11 @@ group by condition(predict,[keep>/>=/=/<=/<]threshold,[,ignoreNull=true/false]) ``` 查询至少连续两行以上的charging_status=1的数据 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true); ``` 当设置`ignoreNull`为false时,遇到null值为将其视为一个不满足条件的行,得到结果原先的分组被含null的行拆分 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false); ``` #### 会话分段聚合 ```sql @@ -770,38 +695,35 @@ group by session(timeInterval) ``` 按照不同的时间单位设定时间间隔 ```sql -select __endTime,count(*) from root.** group by session(1d) +select __endTime,count(*) from root.** group by session(1d); ``` 和`HAVING`、`ALIGN BY DEVICE`共同使用 ```sql -select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` #### 点数分段聚合 ```sql group by count(controlExpression, size[,ignoreNull=true/false]) ``` -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) - +```sql +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); +``` 当使用ignoreNull将null值也考虑进来 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` ### 4.5 聚合结果过滤 不正确的: ```sql -select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 - -select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 - -select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 - -select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; +select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` SQL 示例: ```sql select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 2; - select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` ### 4.6 结果集补空值 @@ -834,37 +756,37 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: 基本的 `LIMIT` 子句 ```sql -select status, temperature from root.ln.wf01.wt01 limit 10 +select status, temperature from root.ln.wf01.wt01 limit 10; ``` 带 `OFFSET` 的 `LIMIT` 子句 ```sql -select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; ``` `LIMIT` 子句与 `WHERE` 子句结合 ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 5 offset 3 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 5 offset 3; ``` `LIMIT` 子句与 `GROUP BY` 子句组合 ```sql -select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3 +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3; ``` #### 按列分页 基本的 `SLIMIT` 子句 ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; ``` 带 `SOFFSET` 的 `SLIMIT` 子句 ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; ``` `SLIMIT` 子句与 `GROUP BY` 子句结合 ```sql -select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` `SLIMIT` 子句与 `LIMIT` 子句结合 ```sql -select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` ### 4.8 排序 @@ -886,7 +808,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 对聚合后的结果进行排序 ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` ### 4.9 查询对齐模式 @@ -899,50 +821,36 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; #### 整体描述 ```sql selectIntoStatement - -​ : SELECT - -​ resultColumn [, resultColumn] ... - -​ INTO intoItem [, intoItem] ... - -​ FROM prefixPath [, prefixPath] ... - -​ [WHERE whereCondition] - -​ [GROUP BY groupByTimeClause, groupByLevelClause] - -​ [FILL ({PREVIOUS | LINEAR | constant} (, interval=DURATION_LITERAL)?)] - -​ [LIMIT rowLimit OFFSET rowOffset] - -​ [ALIGN BY DEVICE] - -​ ; - - + : SELECT + resultColumn [, resultColumn] ... + INTO intoItem [, intoItem] ... + FROM prefixPath [, prefixPath] ... + [WHERE whereCondition] + [GROUP BY groupByTimeClause, groupByLevelClause] + [FILL ({PREVIOUS | LINEAR | constant} (, interval=DURATION_LITERAL)?)] + [LIMIT rowLimit OFFSET rowOffset] + [ALIGN BY DEVICE] + ; intoItem - -​ : [ALIGNED] intoDevicePath '(' intoMeasurementName [',' intoMeasurementName]* ')' - -​ ; + : [ALIGNED] intoDevicePath '(' intoMeasurementName [',' intoMeasurementName]* ')' + ; ``` 按时间对齐,将 `root.sg` database 下四条序列的查询结果写入到 `root.sg_copy` database 下指定的四条序列中 ```sql -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; ``` 按时间对齐,将聚合查询的结果存储到指定序列中 ```sql -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); ``` 按设备对齐 ```sql -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` 按设备对齐,将表达式计算的结果存储到指定序列中 ```sql -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; ``` #### 使用变量占位符 @@ -950,21 +858,15 @@ IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) fr ###### 目标设备不使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select s1, s2 - into root.sg_copy.d1(::), root.sg_copy.d2(s1), root.sg_copy.d1(${3}), root.sg_copy.d2(::) - from root.sg.d1, root.sg.d2; ``` 该语句等价于: ```sql - select s1, s2 - into root.sg_copy.d1(s1), root.sg_copy.d2(s1), root.sg_copy.d1(s2), root.sg_copy.d2(s2) - from root.sg.d1, root.sg.d2; ``` @@ -972,9 +874,7 @@ from root.sg.d1, root.sg.d2; ```sql select d1.s1, d1.s2, d2.s3, d3.s4 - into ::(s1_1, s2_2), root.sg.d2_2(s3_3), root.${2}_copy.::(s4) - from root.sg; ``` @@ -988,47 +888,37 @@ select * into root.sg_bk.::(::) from root.sg.**; ###### 目标设备不使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select s1, s2, s3, s4 - into root.backup_sg.d1(s1, s2, s3, s4), root.backup_sg.d2(::), root.sg.d3(backup_${4}) - from root.sg.d1, root.sg.d2, root.sg.d3 - align by device; ``` ###### 目标设备使用变量占位符 & 目标物理量列表不使用变量占位符 ```sql - select avg(s1), sum(s2) + sum(s3), count(s4) - into root.agg_${2}.::(avg_s1, sum_s2_add_s3, count_s4) - from root.** - align by device; ``` ###### 目标设备使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select * into ::(backup_${4}) from root.sg.** align by device; ``` #### 指定目标序列为对齐序列 ```sql - select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` ## 5. 运维语句 生成对应的查询计划 ```sql -explain select s1,s2 from root.sg.d1 +explain select s1,s2 from root.sg.d1; ``` 执行对应的查询语句,并获取分析结果 ```sql -explain analyze select s1,s2 from root.sg.d1 order by s1 +explain analyze select s1,s2 from root.sg.d1 order by s1; ``` ## 6. 运算符 @@ -1039,7 +929,7 @@ explain analyze select s1,s2 from root.sg.d1 order by s1 更多见文档 [Arithmetic Operators and Functions](./Operator-and-Expression.md#_1-1-算数运算符) ```sql -select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1 +select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1; ``` ### 6.2 比较运算符 @@ -1047,27 +937,27 @@ select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root 更多见文档[Comparison Operators and Functions](./Operator-and-Expression.md#_1-2-比较运算符) ```sql -# Basic comparison operators +-- Basic comparison operators; select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; -# `BETWEEN ... AND ...` operator +-- `BETWEEN ... AND ...` operator; select temperature from root.sg1.d1 where temperature between 36.5 and 40; select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -# Fuzzy matching operator: Use `Like` for fuzzy matching -select * from root.sg.d1 where value like '%cc%' -select * from root.sg.device where value like '_b_' +-- Fuzzy matching operator: Use `Like` for fuzzy matching; +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; -# Fuzzy matching operator: Use `Regexp` for fuzzy matching -select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-- Fuzzy matching operator: Use `Regexp` for fuzzy matching; +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; select b, b like '1%', b regexp '[0-2]' from root.test; -# `IS NULL` operator +-- `IS NULL` operator; select code from root.sg1.d1 where temperature is null; select code from root.sg1.d1 where temperature is not null; -# `IN` operator +-- `IN` operator; select code from root.sg1.d1 where code in ('200', '300', '400', '500'); select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); select a, a in (1, 2) from root.test; @@ -1123,25 +1013,25 @@ select ts, in_range(ts, 'lower'='2', 'upper'='3.1') from root.test; ```sql select s1, string_contains(s1, 's'='warn') from root.sg1.d4; select s1, string_matches(s1, 'regex'='[^\\s]+37229') from root.sg1.d4; -select s1, length(s1) from root.sg1.d1 -select s1, locate(s1, "target"="1") from root.sg1.d1 -select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 -select s1, startswith(s1, "target"="1") from root.sg1.d1 -select s1, endswith(s1, "target"="1") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 -select s1, substring(s1 from 1 for 2) from root.sg1.d1 -select s1, replace(s1, 'es', 'tt') from root.sg1.d1 -select s1, upper(s1) from root.sg1.d1 -select s1, lower(s1) from root.sg1.d1 -select s3, trim(s3) from root.sg1.d1 -select s1, s2, strcmp(s1, s2) from root.sg1.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1 -select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 +select s1, length(s1) from root.sg1.d1; +select s1, locate(s1, "target"="1") from root.sg1.d1; +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1; +select s1, startswith(s1, "target"="1") from root.sg1.d1; +select s1, endswith(s1, "target"="1") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1; +select s1, substring(s1 from 1 for 2) from root.sg1.d1; +select s1, replace(s1, 'es', 'tt') from root.sg1.d1; +select s1, upper(s1) from root.sg1.d1; +select s1, lower(s1) from root.sg1.d1; +select s3, trim(s3) from root.sg1.d1; +select s1, s2, strcmp(s1, s2) from root.sg1.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1; +select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1; +select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1; ``` ### 7.5 数据类型转换函数 @@ -1149,7 +1039,7 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 更多见文档[Data Type Conversion Function](./Operator-and-Expression.md#_2-5-数据类型转换函数) ```sql -SELECT cast(s1 as INT32) from root.sg +SELECT cast(s1 as INT32) from root.sg; ``` ### 7.6 常序列生成函数 @@ -1197,8 +1087,8 @@ select equal_size_bucket_agg_sample(temperature, 'type'='avg','proportion'='0.1' select equal_size_bucket_m4_sample(temperature, 'proportion'='0.1') as M4_sample from root.ln.wf01.wt01; select equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='avg', 'number'='2') as outlier_avg_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='stendis', 'number'='2') as outlier_stendis_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='cos', 'number'='2') as outlier_cos_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='prenextdis', 'number'='2') as outlier_prenextdis_sample from root.ln.wf01.wt01; -select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1 -select M4(s1,'windowSize'='10') from root.vehicle.d1 +select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1; +select M4(s1,'windowSize'='10') from root.vehicle.d1; ``` ### 7.11 时间序列处理函数 @@ -1206,7 +1096,7 @@ select M4(s1,'windowSize'='10') from root.vehicle.d1 更多见文档[Time-Series](./Operator-and-Expression.md#_2-11-时间序列处理函数) ```sql -select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1 +select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1; ``` ## 8. 数据质量函数库 @@ -1218,24 +1108,24 @@ select change_points(s1), change_points(s2), change_points(s3), change_points(s4 更多见文档[Data-Quality](../SQL-Manual/UDF-Libraries.md#数据质量) ```sql -# Completeness -select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Completeness; +select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Consistency -select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Consistency; +select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Timeliness -select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Timeliness; +select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Validity -select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Validity; +select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Accuracy -select Accuracy(t1,t2,t3,m1,m2,m3) from root.test +# Accuracy; +select Accuracy(t1,t2,t3,m1,m2,m3) from root.test; ``` ### 8.2 数据画像 @@ -1243,79 +1133,79 @@ select Accuracy(t1,t2,t3,m1,m2,m3) from root.test 更多见文档[Data-Profiling](../SQL-Manual/UDF-Libraries.md#数据画像) ```sql -# ACF -select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05 +# ACF; +select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05; -# Distinct -select distinct(s2) from root.test.d2 +# Distinct; +select distinct(s2) from root.test.d2; -# Histogram -select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1 +# Histogram; +select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1; -# Integral -select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 -select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10 +# Integral; +select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; +select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10; -# IntegralAvg -select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 +# IntegralAvg; +select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; -# Mad -select mad(s0) from root.test -select mad(s0, "error"="0.01") from root.test +# Mad; +select mad(s0) from root.test; +select mad(s0, "error"="0.01") from root.test; -# Median -select median(s0, "error"="0.01") from root.test +# Median; +select median(s0, "error"="0.01") from root.test; -# MinMax -select minmax(s1) from root.test +# MinMax; +select minmax(s1) from root.test; -# Mode -select mode(s2) from root.test.d2 +# Mode; +select mode(s2) from root.test.d2; -# MvAvg -select mvavg(s1, "window"="3") from root.test +# MvAvg; +select mvavg(s1, "window"="3") from root.test; -# PACF -select pacf(s1, "lag"="5") from root.test +# PACF; +select pacf(s1, "lag"="5") from root.test; -# Percentile -select percentile(s0, "rank"="0.2", "error"="0.01") from root.test +# Percentile; +select percentile(s0, "rank"="0.2", "error"="0.01") from root.test; -# Quantile -select quantile(s0, "rank"="0.2", "K"="800") from root.test +# Quantile; +select quantile(s0, "rank"="0.2", "K"="800") from root.test; -# Period -select period(s1) from root.test.d3 +# Period; +select period(s1) from root.test.d3; -# QLB -select QLB(s1) from root.test.d1 +# QLB; +select QLB(s1) from root.test.d1; -# Resample -select resample(s1,'every'='5m','interp'='linear') from root.test.d1 -select resample(s1,'every'='30m','aggr'='first') from root.test.d1 -select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1 +# Resample; +select resample(s1,'every'='5m','interp'='linear') from root.test.d1; +select resample(s1,'every'='30m','aggr'='first') from root.test.d1; +select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1; -# Sample -select sample(s1,'method'='reservoir','k'='5') from root.test.d1 -select sample(s1,'method'='isometric','k'='5') from root.test.d1 +# Sample; +select sample(s1,'method'='reservoir','k'='5') from root.test.d1; +select sample(s1,'method'='isometric','k'='5') from root.test.d1; -# Segment -select segment(s1, "error"="0.1") from root.test +# Segment; +select segment(s1, "error"="0.1") from root.test; -# Skew -select skew(s1) from root.test.d1 +# Skew; +select skew(s1) from root.test.d1; -# Spline -select spline(s1, "points"="151") from root.test +# Spline; +select spline(s1, "points"="151") from root.test; -# Spread -select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 +# Spread; +select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; -# Stddev -select stddev(s1) from root.test.d1 +# Stddev; +select stddev(s1) from root.test.d1; -# ZScore -select zscore(s1) from root.test +# ZScore; +select zscore(s1) from root.test; ``` ### 8.3 异常检测 @@ -1323,34 +1213,34 @@ select zscore(s1) from root.test 更多见文档[Anomaly-Detection](../SQL-Manual/UDF-Libraries.md#异常检测) ```sql -# IQR -select iqr(s1) from root.test +# IQR; +select iqr(s1) from root.test; -# KSigma -select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# KSigma; +select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# LOF -select lof(s1,s2) from root.test.d1 where time<1000 -select lof(s1, "method"="series") from root.test.d1 where time<1000 +# LOF; +select lof(s1,s2) from root.test.d1 where time<1000; +select lof(s1, "method"="series") from root.test.d1 where time<1000; -# MissDetect -select missdetect(s2,'minlen'='10') from root.test.d2 +# MissDetect; +select missdetect(s2,'minlen'='10') from root.test.d2; -# Range -select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# Range; +select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# TwoSidedFilter -select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test +# TwoSidedFilter; +select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test; -# Outlier -select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test +# Outlier; +select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test; -# MasterTrain -select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test +# MasterTrain; +select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test; -# MasterDetect -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test +# MasterDetect; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test; ``` ### 8.4 频域分析 @@ -1358,31 +1248,31 @@ select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3 更多见文档[Frequency-Domain](../SQL-Manual/UDF-Libraries.md#频域分析) ```sql -# Conv -select conv(s1,s2) from root.test.d2 +# Conv; +select conv(s1,s2) from root.test.d2; -# Deconv -select deconv(s3,s2) from root.test.d2 -select deconv(s3,s2,'result'='remainder') from root.test.d2 +# Deconv; +select deconv(s3,s2) from root.test.d2; +select deconv(s3,s2,'result'='remainder') from root.test.d2; -# DWT -select dwt(s1,"method"="haar") from root.test.d1 +# DWT; +select dwt(s1,"method"="haar") from root.test.d1; -# FFT -select fft(s1) from root.test.d1 -select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1 +# FFT; +select fft(s1) from root.test.d1; +select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1; -# HighPass -select highpass(s1,'wpass'='0.45') from root.test.d1 +# HighPass; +select highpass(s1,'wpass'='0.45') from root.test.d1; -# IFFT -select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1 +# IFFT; +select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1; -# LowPass -select lowpass(s1,'wpass'='0.45') from root.test.d1 +# LowPass; +select lowpass(s1,'wpass'='0.45') from root.test.d1; -# Envelope -select envelope(s1) from root.test.d1 +# Envelope; +select envelope(s1) from root.test.d1; ``` ### 8.5 数据匹配 @@ -1390,20 +1280,20 @@ select envelope(s1) from root.test.d1 更多见文档[Data-Matching](../SQL-Manual/UDF-Libraries.md#数据匹配) ```sql -# Cov -select cov(s1,s2) from root.test.d2 +# Cov; +select cov(s1,s2) from root.test.d2; -# DTW -select dtw(s1,s2) from root.test.d2 +# DTW; +select dtw(s1,s2) from root.test.d2; -# Pearson -select pearson(s1,s2) from root.test.d2 +# Pearson; +select pearson(s1,s2) from root.test.d2; -# PtnSym -select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1 +# PtnSym; +select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1; -# XCorr -select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 +# XCorr; +select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05; ``` ### 8.6 数据修复 @@ -1411,24 +1301,24 @@ select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 更多见文档[Data-Repairing](../SQL-Manual/UDF-Libraries.md#数据修复) ```sql -# TimestampRepair -select timestamprepair(s1,'interval'='10000') from root.test.d2 -select timestamprepair(s1) from root.test.d2 +# TimestampRepair; +select timestamprepair(s1,'interval'='10000') from root.test.d2; +select timestamprepair(s1) from root.test.d2; -# ValueFill -select valuefill(s1) from root.test.d2 -select valuefill(s1,"method"="previous") from root.test.d2 +# ValueFill; +select valuefill(s1) from root.test.d2; +select valuefill(s1,"method"="previous") from root.test.d2; -# ValueRepair -select valuerepair(s1) from root.test.d2 -select valuerepair(s1,'method'='LsGreedy') from root.test.d2 +# ValueRepair; +select valuerepair(s1) from root.test.d2; +select valuerepair(s1,'method'='LsGreedy') from root.test.d2; -# MasterRepair -select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test +# MasterRepair; +select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test; -# SeasonalRepair -select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2 -select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 +# SeasonalRepair; +select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2; +select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2; ``` ### 8.7 序列发现 @@ -1436,12 +1326,12 @@ select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 更多见文档[Series-Discovery](../SQL-Manual/UDF-Libraries.md#序列发现) ```sql -# ConsecutiveSequences -select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1 -select consecutivesequences(s1,s2) from root.test.d1 +# ConsecutiveSequences; +select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1; +select consecutivesequences(s1,s2) from root.test.d1; -# ConsecutiveWindows -select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 +# ConsecutiveWindows; +select consecutivewindows(s1,s2,'length'='10m') from root.test.d1; ``` ### 8.8 机器学习 @@ -1449,14 +1339,14 @@ select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 更多见文档[Machine-Learning](../SQL-Manual/UDF-Libraries.md#机器学习) ```sql -# AR -select ar(s0,"p"="2") from root.test.d0 +# AR; +select ar(s0,"p"="2") from root.test.d0; -# Representation -select representation(s0,"tb"="3","vb"="2") from root.test.d0 +# Representation; +select representation(s0,"tb"="3","vb"="2") from root.test.d0; -# RM -select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 +# RM; +select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0; ``` ## 9. 条件表达式 @@ -1469,24 +1359,24 @@ when 1000=1050 then "bad temperature" when P<=1000000 or P>=1100000 then "bad pressure" end as `result` -from root.test1 +from root.test1; select str, case when str like "%cc%" then "has cc" when str like "%dd%" then "has dd" else "no cc and dd" end as `result` -from root.test2 +from root.test2; select count(case when x<=1 then 1 end) as `(-∞,1]`, count(case when 1 SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 配置连续查询的时间窗口大小 ```sql CREATE CONTINUOUS QUERY cq2 - RESAMPLE RANGE 40s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) +END; -END - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 同时配置连续查询执行的周期性间隔和时间窗口大小 ```sql CREATE CONTINUOUS QUERY cq3 - RESAMPLE EVERY 20s RANGE 40s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) - FILL(100.0) +END; -END - - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 配置连续查询每次查询执行时间窗口的结束时间 ```sql CREATE CONTINUOUS QUERY cq4 - RESAMPLE EVERY 20s RANGE 40s, 20s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) - FILL(100.0) +END; -END - - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 没有GROUP BY TIME子句的连续查询 ```sql CREATE CONTINUOUS QUERY cq5 - RESAMPLE EVERY 20s - BEGIN - SELECT temperature + 1 - INTO root.precalculated_sg.::(temperature) - FROM root.ln.*.* - align by device +END; -END - - - -\> SELECT temperature from root.precalculated_sg.*.* align by device; +SELECT temperature from root.precalculated_sg.*.* align by device; ``` ### 11.2 连续查询的管理 @@ -1761,18 +1567,12 @@ DROP CONTINUOUS QUERY s1_count_cq; 1. 创建一个连续查询 ```sql CREATE CQ s1_count_cq - BEGIN - -​ SELECT count(s1) - -​ INTO root.sg_count.d.count_s1 - -​ FROM root.sg.d - -​ GROUP BY(30m) - -END + SELECT count(s1) + INTO root.sg_count.d.count_s1 + FROM root.sg.d + GROUP BY(30m) +END; ``` 1. 查询连续查询的结果 ```sql @@ -1792,11 +1592,11 @@ CREATE FUNCTION AS (USING URI URI-STRING)? #### 不指定URI ```sql -CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' +CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample'; ``` #### 指定URI ```sql -CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http://jar/example.jar' +CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http://jar/example.jar'; ``` ### 12.3 UDF 卸载 @@ -1804,7 +1604,7 @@ CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http:// DROP FUNCTION ``` ```sql -DROP FUNCTION example +DROP FUNCTION example; ``` ### 12.4 UDF 查询 @@ -1818,16 +1618,13 @@ SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; #### 与其他查询的嵌套查询 ```sql SELECT s1, s2, example(s1, s2) FROM root.sg.d1; - SELECT *, example(*) FROM root.sg.d1 DISABLE ALIGN; - SELECT s1 * example(* / s1 + s2) FROM root.sg.d1; - SELECT s1, s2, s1 + example(s1, s2), s1 - example(s1 + example(s1, s2) / s2) FROM root.sg.d1; ``` ### 12.5 查看所有注册的 UDF ```sql -SHOW FUNCTIONS +SHOW FUNCTIONS; ``` ## 13. 权限管理 @@ -1835,68 +1632,63 @@ SHOW FUNCTIONS - 创建用户(需 MANAGE_USER 权限) - ```SQL -CREATE USER -eg: CREATE USER user1 'passwd' +CREATE USER ; +eg: CREATE USER user1 'passwd'; ``` - 删除用户 (需 MANEGE_USER 权限) - ```SQL -DROP USER -eg: DROP USER user1 +DROP USER ; +eg: DROP USER user1; ``` - 创建角色 (需 MANAGE_ROLE 权限) ```SQL -CREATE ROLE -eg: CREATE ROLE role1 +CREATE ROLE ; +eg: CREATE ROLE role1; ``` - 删除角色 (需 MANAGE_ROLE 权限) - ```SQL -DROP ROLE -eg: DROP ROLE role1 +DROP ROLE ; +eg: DROP ROLE role1; ``` - 赋予用户角色 (需 MANAGE_ROLE 权限) - ```SQL -GRANT ROLE TO -eg: GRANT ROLE admin TO user1 +GRANT ROLE TO ; +eg: GRANT ROLE admin TO user1; ``` - 移除用户角色 (需 MANAGE_ROLE 权限) - ```SQL -REVOKE ROLE FROM -eg: REVOKE ROLE admin FROM user1 +REVOKE ROLE FROM ; +eg: REVOKE ROLE admin FROM user1; ``` - 列出所有用户 (需 MANEGE_USER 权限) ```SQL -LIST USER +LIST USER; ``` - 列出所有角色 (需 MANAGE_ROLE 权限) ```SQL -LIST ROLE +LIST ROLE; ``` - 列出指定角色下所有用户 (需 MANEGE_USER 权限) ```SQL -LIST USER OF ROLE -eg: LIST USER OF ROLE roleuser +LIST USER OF ROLE ; +eg: LIST USER OF ROLE roleuser; ``` - 列出指定用户下所有角色 @@ -1904,8 +1696,8 @@ eg: LIST USER OF ROLE roleuser 用户可以列出自己的角色,但列出其他用户的角色需要拥有 MANAGE_ROLE 权限。 ```SQL -LIST ROLE OF USER -eg: LIST ROLE OF USER tempuser +LIST ROLE OF USER ; +eg: LIST ROLE OF USER tempuser; ``` - 列出用户所有权限 @@ -1915,7 +1707,6 @@ eg: LIST ROLE OF USER tempuser ```SQL LIST PRIVILEGES OF USER ; eg: LIST PRIVILEGES OF USER tempuser; - ``` - 列出角色所有权限 diff --git a/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md b/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md index 2e42cd67f..44e4a6f00 100644 --- a/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md +++ b/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md @@ -7,32 +7,32 @@ #### 创建数据库 ```sql -CREATE DATABASE root.ln +CREATE DATABASE root.ln; ``` #### 查看数据库 ```sql -show databases -show databases root.* -show databases root.** +show databases; +show databases root.*; +show databases root.**; ``` #### 删除数据库 ```sql -DELETE DATABASE root.ln -DELETE DATABASE root.sgcc -DELETE DATABASE root.** +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +DELETE DATABASE root.**; ``` #### 统计数据库数量 ```sql -count databases -count databases root.* -count databases root.sgcc.* -count databases root.sgcc +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` ### 1.2 时间序列管理 @@ -40,119 +40,119 @@ count databases root.sgcc #### 创建时间序列 ```sql -create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - 简化版 ```sql -create timeseries root.ln.wf01.wt01.status BOOLEAN -create timeseries root.ln.wf01.wt01.temperature FLOAT -create timeseries root.ln.wf02.wt02.hardware TEXT -create timeseries root.ln.wf02.wt02.status BOOLEAN -create timeseries root.sgcc.wf03.wt01.status BOOLEAN -create timeseries root.sgcc.wf03.wt01.temperature FLOAT +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` - 错误提示 ```sql -create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN -> error: encoding TS_2DIFF does not support BOOLEAN +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN; +> error: encoding TS_2DIFF does not support BOOLEAN; ``` #### 创建对齐时间序列 ```sql -CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` #### 删除时间序列 ```sql -delete timeseries root.ln.wf01.wt01.status -delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -delete timeseries root.ln.wf02.* -drop timeseries root.ln.wf02.* +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` #### 查看时间序列 ```sql -SHOW TIMESERIES -SHOW TIMESERIES -SHOW TIMESERIES root.** -SHOW TIMESERIES root.ln.** -SHOW TIMESERIES root.ln.** limit 10 offset 10 -SHOW TIMESERIES root.ln.** where timeseries contains 'wf01.wt' -SHOW TIMESERIES root.ln.** where dataType=FLOAT +SHOW TIMESERIES; +SHOW TIMESERIES ; +SHOW TIMESERIES root.**; +SHOW TIMESERIES root.ln.**; +SHOW TIMESERIES root.ln.** limit 10 offset 10; +SHOW TIMESERIES root.ln.** where timeseries contains 'wf01.wt'; +SHOW TIMESERIES root.ln.** where dataType=FLOAT; SHOW TIMESERIES root.ln.** where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -SHOW LATEST TIMESERIES +SHOW LATEST TIMESERIES; ``` #### 统计时间序列数量 ```sql -COUNT TIMESERIES root.** -COUNT TIMESERIES root.ln.** -COUNT TIMESERIES root.ln.*.*.status -COUNT TIMESERIES root.ln.wf01.wt01.status -COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc'; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c'; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c'; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; COUNT TIMESERIES root.** WHERE time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -COUNT TIMESERIES root.** GROUP BY LEVEL=1 -COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` #### 标签点管理 ```sql -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` - 重命名标签或属性 ```sql -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` - 重新设置标签或属性的值 ```sql -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` - 删除已经存在的标签或属性 ```sql -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` - 添加新的标签 ```sql -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` - 添加新的属性 ```sql -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` - 更新插入别名,标签和属性 ```sql -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4); ``` - 使用标签作为过滤条件查询时间序列 @@ -164,37 +164,37 @@ SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause 返回给定路径的下的所有满足条件的时间序列信息: ```sql -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` - 使用标签作为过滤条件统计时间序列数量 ```sql -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` 返回给定路径的下的所有满足条件的时间序列的数量: ```sql -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` 创建对齐时间序列: ```sql -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` 支持查询: ```sql -show timeseries where TAGS(tag1)='v1' +show timeseries where TAGS(tag1)='v1'; ``` ### 1.3 时间序列路径管理 @@ -202,121 +202,93 @@ show timeseries where TAGS(tag1)='v1' #### 查看路径的所有子路径 ```sql -SHOW CHILD PATHS pathPattern -- 查询 root.ln 的下一层:show child paths root.ln -- 查询形如 root.xx.xx.xx 的路径:show child paths root.*.* +SHOW CHILD PATHS pathPattern; +- 查询 root.ln 的下一层; +show child paths root.ln; +- 查询形如 root.xx.xx.xx 的路径; +show child paths root.*.*; ``` #### 查看路径的所有子节点 ```sql -SHOW CHILD NODES pathPattern - -- 查询 root 的下一层:show child nodes root -- 查询 root.ln 的下一层 :show child nodes root.ln +SHOW CHILD NODES pathPattern; +- 查询 root 的下一层; +show child nodes root; +- 查询 root.ln 的下一层; +show child nodes root.ln; ``` #### 查看设备 ```sql -IoTDB> show devices - -IoTDB> show devices root.ln.** - -IoTDB> show devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +show devices; +show devices root.ln.**; +show devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; ``` ##### 查看设备及其 database 信息 ```sql -IoTDB> show devices with database - -IoTDB> show devices root.ln.** with database +show devices with database; +show devices root.ln.** with database; ``` #### 统计节点数 ```sql -IoTDB > COUNT NODES root.** LEVEL=2 - -IoTDB > COUNT NODES root.ln.** LEVEL=2 - -IoTDB > COUNT NODES root.ln.wf01.* LEVEL=3 - -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.* LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` #### 统计设备数量 ```sql - -IoTDB> count devices - -IoTDB> count devices root.ln.** - -IoTDB> count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +count devices; +count devices root.ln.**; +count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; ``` ### 1.4 数据存活时间管理 #### 设置 TTL ```sql -IoTDB> set ttl to root.ln 3600000 -``` -```sql -IoTDB> set ttl to root.sgcc.** 3600000 -``` -```sql -IoTDB> set ttl to root.** 3600000 +set ttl to root.ln 3600000; +set ttl to root.sgcc.** 3600000; +set ttl to root.** 3600000; ``` #### 取消 TTL ```sql -IoTDB> unset ttl from root.ln -``` -```sql -IoTDB> unset ttl from root.sgcc.** -``` -```sql -IoTDB> unset ttl from root.** +unset ttl from root.ln; +unset ttl from root.sgcc.**; +unset ttl from root.**; ``` #### 显示 TTL ```sql -IoTDB> SHOW ALL TTL -``` -```sql -IoTDB> SHOW TTL ON pathPattern -``` -```sql -IoTDB> show DEVICES +SHOW ALL TTL; +SHOW TTL ON pathPattern; +show DEVICES; ``` ## 2. 写入数据 ### 2.1 写入单列数据 ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) -``` -```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'),(2, 'v1') +insert into root.ln.wf02.wt02(timestamp,status) values(1,true); +insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'),(2, 'v1'); ``` ### 2.2 写入多列数据 ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) values (2, false, 'v2') -``` -```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4') +insert into root.ln.wf02.wt02(timestamp, status, hardware) values (2, false, 'v2'); +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4'); ``` ### 2.3 使用服务器时间戳 ```sql -IoTDB > insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2') +insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2'); ``` ### 2.4 写入对齐时间序列数据 ```sql -IoTDB > create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE) -``` -```sql -IoTDB > insert into root.sg1.d1(timestamp, s1, s2) aligned values(1, 1, 1) -``` -```sql -IoTDB > insert into root.sg1.d1(timestamp, s1, s2) aligned values(2, 2, 2), (3, 3, 3) -``` -```sql -IoTDB > select * from root.sg1.d1 +create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE); +insert into root.sg1.d1(timestamp, s1, s2) aligned values(1, 1, 1); +insert into root.sg1.d1(timestamp, s1, s2) aligned values(2, 2, 2), (3, 3, 3); +select * from root.sg1.d1; ``` ### 2.5 加载 TsFile 文件数据 @@ -342,43 +314,24 @@ load '' [sglevel=int][onSuccess=delete/none] ### 3.1 删除单列数据 ```sql delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; -``` -```sql delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -``` -```sql -delete from root.ln.wf02.wt02.status where time < 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time <= 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time < 20 and time > 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time > 20 -``` -```sql -delete from root.ln.wf02.wt02.status where time >= 20 -``` -```sql -delete from root.ln.wf02.wt02.status where time = 20 +delete from root.ln.wf02.wt02.status where time < 10; +delete from root.ln.wf02.wt02.status where time <= 10; +delete from root.ln.wf02.wt02.status where time < 20 and time > 10; +delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10; +delete from root.ln.wf02.wt02.status where time > 20; +delete from root.ln.wf02.wt02.status where time >= 20; +delete from root.ln.wf02.wt02.status where time = 20; ``` 出错: ```sql -delete from root.ln.wf02.wt02.status where time > 4 or time < 0 - -Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic - -expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' +delete from root.ln.wf02.wt02.status where time > 4 or time < 0; +Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' ``` 删除时间序列中的所有数据: ```sql -delete from root.ln.wf02.wt02.status +delete from root.ln.wf02.wt02.status; ``` ### 3.2 删除多列数据 ```sql @@ -386,8 +339,7 @@ delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; ``` 声明式的编程方式: ```sql -IoTDB> delete from root.ln.wf03.wt02.status where time < now() - +delete from root.ln.wf03.wt02.status where time < now(); Msg: The statement is executed successfully. ``` ## 4. 数据查询 @@ -396,7 +348,7 @@ Msg: The statement is executed successfully. #### 时间过滤查询 ```sql -select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` #### 根据一个时间区间选择多列数据 ```sql @@ -427,9 +379,7 @@ select s1 as temperature, s2 as speed from root.ln.wf01.wt01; 不支持: ```sql select s1, count(s1) from root.sg.d1; - select sin(s1), count(s1) from root.sg.d1; - select s1, count(s1) from root.sg.d1 group by ([10,100),10ms); ``` ##### 时间序列查询嵌套表达式 @@ -437,67 +387,49 @@ select s1, count(s1) from root.sg.d1 group by ([10,100),10ms); 示例 1: ```sql select a, - -​ b, - -​ ((a + 1) * 2 - 1) % 2 + 1.5, - -​ sin(a + sin(a + sin(b))), - -​ -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 - + b, + ((a + 1) * 2 - 1) % 2 + 1.5, + sin(a + sin(a + sin(b))), + -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 from root.sg1; ``` 示例 2: ```sql -select (a + b) * 2 + sin(a) from root.sg - +select (a + b) * 2 + sin(a) from root.sg; +``` 示例 3: - -select (a + *) / 2 from root.sg1 - +```sql +select (a + *) / 2 from root.sg1; +``` 示例 4: - -select (a + b) * 3 from root.sg, root.ln +```sql +select (a + b) * 3 from root.sg, root.ln; ``` ##### 聚合查询嵌套表达式 示例 1: ```sql select avg(temperature), - -​ sin(avg(temperature)), - -​ avg(temperature) + 1, - -​ -sum(hardware), - -​ avg(temperature) + sum(hardware) - + sin(avg(temperature)), + avg(temperature) + 1, + -sum(hardware), + avg(temperature) + sum(hardware) from root.ln.wf01.wt01; ``` 示例 2: ```sql select avg(*), - -​ (avg(*) + 1) * 3 / 2 -1 - -from root.sg1 + (avg(*) + 1) * 3 / 2 -1 +from root.sg1; ``` 示例 3: ```sql select avg(temperature), - -​ sin(avg(temperature)), - -​ avg(temperature) + 1, - -​ -sum(hardware), - -​ avg(temperature) + sum(hardware) as custom_sum - + sin(avg(temperature)), + avg(temperature) + 1, + -sum(hardware), + avg(temperature) + sum(hardware) as custom_sum from root.ln.wf01.wt01 - GROUP BY([10, 90), 10ms); ``` #### 最新点查询 @@ -510,15 +442,15 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < 查询 root.ln.wf01.wt01.status 的最新数据点 ```sql -IoTDB> select last status from root.ln.wf01.wt01 +select last status from root.ln.wf01.wt01; ``` 查询 root.ln.wf01.wt01 下 status,temperature 时间戳大于等于 2017-11-07T23:50:00 的最新数据点 ```sql -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; ``` 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照序列名降序排列 ```sql -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +select last * from root.ln.wf01.wt01 order by timeseries desc; ``` ### 4.3 查询过滤条件 @@ -573,20 +505,20 @@ select code from root.sg1.d1 where temperature is not null; 查询 `root.sg.d1` 下 `value` 含有`'cc'`的数据 ```sql -IoTDB> select * from root.sg.d1 where value like '%cc%' +select * from root.sg.d1 where value like '%cc%'; ``` 查询 `root.sg.d1` 下 `value` 中间为 `'b'`、前后为任意单个字符的数据 ```sql -IoTDB> select * from root.sg.device where value like '_b_' +select * from root.sg.device where value like '_b_'; ``` 查询 root.sg.d1 下 value 值为26个英文字符组成的字符串 ```sql -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; ``` 查询 root.sg.d1 下 value 值为26个小写英文字符组成的字符串且时间大于100的 ```sql -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; ``` ### 4.4 分段分组聚合 @@ -629,23 +561,23 @@ select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017 统计不同 database 下 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 1 +select count(status) from root.** group by level = 1; ``` 统计不同设备下 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 3 +select count(status) from root.** group by level = 3; ``` 统计不同 database 下的不同设备中 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 1, 3 +select count(status) from root.** group by level = 1, 3; ``` 查询所有序列下温度传感器 temperature 的最大值 ```sql -select max_value(temperature) from root.** group by level = 0 +select max_value(temperature) from root.** group by level = 0; ``` 查询某一层级下所有传感器拥有的总数据点数 ```sql -select count(*) from root.ln.** group by level = 2 +select count(*) from root.ln.** group by level = 2; ``` #### 标签分组聚合 @@ -667,15 +599,15 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) ``` ##### delta=0时的等值事件分段 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); ``` 指定ignoreNull为false ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); ``` ##### delta!=0时的差值事件分段 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); ``` #### 条件分段聚合 ```sql @@ -683,11 +615,11 @@ group by condition(predict,[keep>/>=/=/<=/<]threshold,[,ignoreNull=true/false]) ``` 查询至少连续两行以上的charging_status=1的数据 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true); ``` 当设置`ignoreNull`为false时,遇到null值为将其视为一个不满足条件的行,得到结果原先的分组被含null的行拆分 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false); ``` #### 会话分段聚合 ```sql @@ -695,38 +627,35 @@ group by session(timeInterval) ``` 按照不同的时间单位设定时间间隔 ```sql -select __endTime,count(*) from root.** group by session(1d) +select __endTime,count(*) from root.** group by session(1d); ``` 和`HAVING`、`ALIGN BY DEVICE`共同使用 ```sql -select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` #### 点数分段聚合 ```sql group by count(controlExpression, size[,ignoreNull=true/false]) ``` -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) - +```sql +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); +``` 当使用ignoreNull将null值也考虑进来 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` ### 4.5 聚合结果过滤 不正确的: ```sql -select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 - -select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 - -select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 - -select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; +select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` SQL 示例: ```sql select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 2; - select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` ### 4.6 结果集补空值 @@ -759,37 +688,37 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: 基本的 `LIMIT` 子句 ```sql -select status, temperature from root.ln.wf01.wt01 limit 10 +select status, temperature from root.ln.wf01.wt01 limit 10; ``` 带 `OFFSET` 的 `LIMIT` 子句 ```sql -select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; ``` `LIMIT` 子句与 `WHERE` 子句结合 ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 5 offset 3 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 5 offset 3; ``` `LIMIT` 子句与 `GROUP BY` 子句组合 ```sql -select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3 +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3; ``` #### 按列分页 基本的 `SLIMIT` 子句 ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; ``` 带 `SOFFSET` 的 `SLIMIT` 子句 ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; ``` `SLIMIT` 子句与 `GROUP BY` 子句结合 ```sql -select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` `SLIMIT` 子句与 `LIMIT` 子句结合 ```sql -select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` ### 4.8 排序 @@ -811,7 +740,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 对聚合后的结果进行排序 ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` ### 4.9 查询对齐模式 @@ -824,50 +753,36 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; #### 整体描述 ```sql selectIntoStatement - -​ : SELECT - -​ resultColumn [, resultColumn] ... - -​ INTO intoItem [, intoItem] ... - -​ FROM prefixPath [, prefixPath] ... - -​ [WHERE whereCondition] - -​ [GROUP BY groupByTimeClause, groupByLevelClause] - -​ [FILL ({PREVIOUS | LINEAR | constant} (, interval=DURATION_LITERAL)?)] - -​ [LIMIT rowLimit OFFSET rowOffset] - -​ [ALIGN BY DEVICE] - -​ ; - - + : SELECT + resultColumn [, resultColumn] ... + INTO intoItem [, intoItem] ... + FROM prefixPath [, prefixPath] ... + [WHERE whereCondition] + [GROUP BY groupByTimeClause, groupByLevelClause] + [FILL ({PREVIOUS | LINEAR | constant} (, interval=DURATION_LITERAL)?)] + [LIMIT rowLimit OFFSET rowOffset] + [ALIGN BY DEVICE] + ; intoItem - -​ : [ALIGNED] intoDevicePath '(' intoMeasurementName [',' intoMeasurementName]* ')' - -​ ; + : [ALIGNED] intoDevicePath '(' intoMeasurementName [',' intoMeasurementName]* ')' + ; ``` 按时间对齐,将 `root.sg` database 下四条序列的查询结果写入到 `root.sg_copy` database 下指定的四条序列中 ```sql -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; ``` 按时间对齐,将聚合查询的结果存储到指定序列中 ```sql -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); ``` 按设备对齐 ```sql -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` 按设备对齐,将表达式计算的结果存储到指定序列中 ```sql -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; ``` #### 使用变量占位符 @@ -875,21 +790,15 @@ IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) fr ###### 目标设备不使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select s1, s2 - into root.sg_copy.d1(::), root.sg_copy.d2(s1), root.sg_copy.d1(${3}), root.sg_copy.d2(::) - from root.sg.d1, root.sg.d2; ``` 该语句等价于: ```sql - select s1, s2 - into root.sg_copy.d1(s1), root.sg_copy.d2(s1), root.sg_copy.d1(s2), root.sg_copy.d2(s2) - from root.sg.d1, root.sg.d2; ``` @@ -897,9 +806,7 @@ from root.sg.d1, root.sg.d2; ```sql select d1.s1, d1.s2, d2.s3, d3.s4 - into ::(s1_1, s2_2), root.sg.d2_2(s3_3), root.${2}_copy.::(s4) - from root.sg; ``` @@ -913,47 +820,37 @@ select * into root.sg_bk.::(::) from root.sg.**; ###### 目标设备不使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select s1, s2, s3, s4 - into root.backup_sg.d1(s1, s2, s3, s4), root.backup_sg.d2(::), root.sg.d3(backup_${4}) - from root.sg.d1, root.sg.d2, root.sg.d3 - align by device; ``` ###### 目标设备使用变量占位符 & 目标物理量列表不使用变量占位符 ```sql - select avg(s1), sum(s2) + sum(s3), count(s4) - into root.agg_${2}.::(avg_s1, sum_s2_add_s3, count_s4) - from root.** - align by device; ``` ###### 目标设备使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select * into ::(backup_${4}) from root.sg.** align by device; ``` #### 指定目标序列为对齐序列 ```sql - select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` ## 5. 运维语句 生成对应的查询计划 ```sql -explain select s1,s2 from root.sg.d1 +explain select s1,s2 from root.sg.d1; ``` 执行对应的查询语句,并获取分析结果 ```sql -explain analyze select s1,s2 from root.sg.d1 order by s1 +explain analyze select s1,s2 from root.sg.d1 order by s1; ``` ## 6. 运算符 @@ -964,7 +861,7 @@ explain analyze select s1,s2 from root.sg.d1 order by s1 更多见文档 [Arithmetic Operators and Functions](./Operator-and-Expression.md#_1-1-算数运算符) ```sql -select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1 +select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1; ``` ### 6.2 比较运算符 @@ -972,27 +869,27 @@ select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root 更多见文档[Comparison Operators and Functions](./Operator-and-Expression.md#_1-2-比较运算符) ```sql -# Basic comparison operators +# Basic comparison operators; select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; -# `BETWEEN ... AND ...` operator +# `BETWEEN ... AND ...` operator; select temperature from root.sg1.d1 where temperature between 36.5 and 40; select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -# Fuzzy matching operator: Use `Like` for fuzzy matching -select * from root.sg.d1 where value like '%cc%' -select * from root.sg.device where value like '_b_' +# Fuzzy matching operator: Use `Like` for fuzzy matching; +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; -# Fuzzy matching operator: Use `Regexp` for fuzzy matching -select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +# Fuzzy matching operator: Use `Regexp` for fuzzy matching; +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; select b, b like '1%', b regexp '[0-2]' from root.test; -# `IS NULL` operator +# `IS NULL` operator; select code from root.sg1.d1 where temperature is null; select code from root.sg1.d1 where temperature is not null; -# `IN` operator +# `IN` operator; select code from root.sg1.d1 where code in ('200', '300', '400', '500'); select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); select a, a in (1, 2) from root.test; @@ -1048,25 +945,25 @@ select ts, in_range(ts, 'lower'='2', 'upper'='3.1') from root.test; ```sql select s1, string_contains(s1, 's'='warn') from root.sg1.d4; select s1, string_matches(s1, 'regex'='[^\\s]+37229') from root.sg1.d4; -select s1, length(s1) from root.sg1.d1 -select s1, locate(s1, "target"="1") from root.sg1.d1 -select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 -select s1, startswith(s1, "target"="1") from root.sg1.d1 -select s1, endswith(s1, "target"="1") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 -select s1, substring(s1 from 1 for 2) from root.sg1.d1 -select s1, replace(s1, 'es', 'tt') from root.sg1.d1 -select s1, upper(s1) from root.sg1.d1 -select s1, lower(s1) from root.sg1.d1 -select s3, trim(s3) from root.sg1.d1 -select s1, s2, strcmp(s1, s2) from root.sg1.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1 -select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 +select s1, length(s1) from root.sg1.d1; +select s1, locate(s1, "target"="1") from root.sg1.d1; +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1; +select s1, startswith(s1, "target"="1") from root.sg1.d1; +select s1, endswith(s1, "target"="1") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1; +select s1, substring(s1 from 1 for 2) from root.sg1.d1; +select s1, replace(s1, 'es', 'tt') from root.sg1.d1; +select s1, upper(s1) from root.sg1.d1; +select s1, lower(s1) from root.sg1.d1; +select s3, trim(s3) from root.sg1.d1; +select s1, s2, strcmp(s1, s2) from root.sg1.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1; +select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1; +select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1; ``` ### 7.5 数据类型转换函数 @@ -1074,7 +971,7 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 更多见文档[Data Type Conversion Function](./Operator-and-Expression.md#_2-5-数据类型转换函数) ```sql -SELECT cast(s1 as INT32) from root.sg +SELECT cast(s1 as INT32) from root.sg; ``` ### 7.6 常序列生成函数 @@ -1122,8 +1019,8 @@ select equal_size_bucket_agg_sample(temperature, 'type'='avg','proportion'='0.1' select equal_size_bucket_m4_sample(temperature, 'proportion'='0.1') as M4_sample from root.ln.wf01.wt01; select equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='avg', 'number'='2') as outlier_avg_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='stendis', 'number'='2') as outlier_stendis_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='cos', 'number'='2') as outlier_cos_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='prenextdis', 'number'='2') as outlier_prenextdis_sample from root.ln.wf01.wt01; -select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1 -select M4(s1,'windowSize'='10') from root.vehicle.d1 +select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1; +select M4(s1,'windowSize'='10') from root.vehicle.d1; ``` ### 7.12 时间序列处理函数 @@ -1131,7 +1028,7 @@ select M4(s1,'windowSize'='10') from root.vehicle.d1 更多见文档[Time-Series](./Operator-and-Expression.md#_2-11-时间序列处理函数) ```sql -select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1 +select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1; ``` ## 8. 数据质量函数库 @@ -1143,24 +1040,24 @@ select change_points(s1), change_points(s2), change_points(s3), change_points(s4 更多见文档[Data-Quality](../SQL-Manual/UDF-Libraries.md#数据质量) ```sql -# Completeness -select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Completeness; +select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Consistency -select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Consistency; +select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Timeliness -select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Timeliness; +select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Validity -select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Validity; +select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Accuracy -select Accuracy(t1,t2,t3,m1,m2,m3) from root.test +# Accuracy; +select Accuracy(t1,t2,t3,m1,m2,m3) from root.test; ``` ### 8.2 数据画像 @@ -1168,79 +1065,79 @@ select Accuracy(t1,t2,t3,m1,m2,m3) from root.test 更多见文档[Data-Profiling](../SQL-Manual/UDF-Libraries.md#数据画像) ```sql -# ACF -select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05 +# ACF; +select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05; -# Distinct -select distinct(s2) from root.test.d2 +# Distinct; +select distinct(s2) from root.test.d2; -# Histogram -select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1 +# Histogram; +select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1; -# Integral -select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 -select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10 +# Integral; +select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; +select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10; -# IntegralAvg -select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 +# IntegralAvg; +select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; -# Mad -select mad(s0) from root.test -select mad(s0, "error"="0.01") from root.test +# Mad; +select mad(s0) from root.test; +select mad(s0, "error"="0.01") from root.test; -# Median -select median(s0, "error"="0.01") from root.test +# Median; +select median(s0, "error"="0.01") from root.test; -# MinMax -select minmax(s1) from root.test +# MinMax; +select minmax(s1) from root.test; -# Mode -select mode(s2) from root.test.d2 +# Mode; +select mode(s2) from root.test.d2; -# MvAvg -select mvavg(s1, "window"="3") from root.test +# MvAvg; +select mvavg(s1, "window"="3") from root.test; -# PACF -select pacf(s1, "lag"="5") from root.test +# PACF; +select pacf(s1, "lag"="5") from root.test; -# Percentile -select percentile(s0, "rank"="0.2", "error"="0.01") from root.test +# Percentile; +select percentile(s0, "rank"="0.2", "error"="0.01") from root.test; -# Quantile -select quantile(s0, "rank"="0.2", "K"="800") from root.test +# Quantile; +select quantile(s0, "rank"="0.2", "K"="800") from root.test; -# Period -select period(s1) from root.test.d3 +# Period; +select period(s1) from root.test.d3; -# QLB -select QLB(s1) from root.test.d1 +# QLB; +select QLB(s1) from root.test.d1; -# Resample -select resample(s1,'every'='5m','interp'='linear') from root.test.d1 -select resample(s1,'every'='30m','aggr'='first') from root.test.d1 -select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1 +# Resample; +select resample(s1,'every'='5m','interp'='linear') from root.test.d1; +select resample(s1,'every'='30m','aggr'='first') from root.test.d1; +select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1; -# Sample -select sample(s1,'method'='reservoir','k'='5') from root.test.d1 -select sample(s1,'method'='isometric','k'='5') from root.test.d1 +# Sample; +select sample(s1,'method'='reservoir','k'='5') from root.test.d1; +select sample(s1,'method'='isometric','k'='5') from root.test.d1; -# Segment -select segment(s1, "error"="0.1") from root.test +# Segment; +select segment(s1, "error"="0.1") from root.test; -# Skew -select skew(s1) from root.test.d1 +# Skew; +select skew(s1) from root.test.d1; -# Spline -select spline(s1, "points"="151") from root.test +# Spline; +select spline(s1, "points"="151") from root.test; -# Spread -select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 +# Spread; +select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; -# Stddev -select stddev(s1) from root.test.d1 +# Stddev; +select stddev(s1) from root.test.d1; -# ZScore -select zscore(s1) from root.test +# ZScore; +select zscore(s1) from root.test; ``` ### 8.3 异常检测 @@ -1248,34 +1145,34 @@ select zscore(s1) from root.test 更多见文档[Anomaly-Detection](../SQL-Manual/UDF-Libraries.md#异常检测) ```sql -# IQR -select iqr(s1) from root.test +# IQR; +select iqr(s1) from root.test; -# KSigma -select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# KSigma; +select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# LOF -select lof(s1,s2) from root.test.d1 where time<1000 -select lof(s1, "method"="series") from root.test.d1 where time<1000 +# LOF; +select lof(s1,s2) from root.test.d1 where time<1000; +select lof(s1, "method"="series") from root.test.d1 where time<1000; -# MissDetect -select missdetect(s2,'minlen'='10') from root.test.d2 +# MissDetect; +select missdetect(s2,'minlen'='10') from root.test.d2; -# Range -select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# Range; +select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# TwoSidedFilter -select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test +# TwoSidedFilter; +select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test; -# Outlier -select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test +# Outlier; +select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test; -# MasterTrain -select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test +# MasterTrain; +select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test; -# MasterDetect -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test +# MasterDetect; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test; ``` ### 8.4 频域分析 @@ -1283,31 +1180,31 @@ select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3 更多见文档[Frequency-Domain](../SQL-Manual/UDF-Libraries.md#频域分析) ```sql -# Conv -select conv(s1,s2) from root.test.d2 +# Conv; +select conv(s1,s2) from root.test.d2; -# Deconv -select deconv(s3,s2) from root.test.d2 -select deconv(s3,s2,'result'='remainder') from root.test.d2 +# Deconv; +select deconv(s3,s2) from root.test.d2; +select deconv(s3,s2,'result'='remainder') from root.test.d2; -# DWT -select dwt(s1,"method"="haar") from root.test.d1 +# DWT; +select dwt(s1,"method"="haar") from root.test.d1; -# FFT -select fft(s1) from root.test.d1 -select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1 +# FFT; +select fft(s1) from root.test.d1; +select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1; -# HighPass -select highpass(s1,'wpass'='0.45') from root.test.d1 +# HighPass; +select highpass(s1,'wpass'='0.45') from root.test.d1; -# IFFT -select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1 +# IFFT; +select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1; -# LowPass -select lowpass(s1,'wpass'='0.45') from root.test.d1 +# LowPass; +select lowpass(s1,'wpass'='0.45') from root.test.d1; -# Envelope -select envelope(s1) from root.test.d1 +# Envelope; +select envelope(s1) from root.test.d1; ``` ### 8.5 数据匹配 @@ -1315,20 +1212,20 @@ select envelope(s1) from root.test.d1 更多见文档[Data-Matching](../SQL-Manual/UDF-Libraries.md#数据匹配) ```sql -# Cov -select cov(s1,s2) from root.test.d2 +# Cov; +select cov(s1,s2) from root.test.d2; -# DTW -select dtw(s1,s2) from root.test.d2 +# DTW; +select dtw(s1,s2) from root.test.d2; -# Pearson -select pearson(s1,s2) from root.test.d2 +# Pearson; +select pearson(s1,s2) from root.test.d2; -# PtnSym -select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1 +# PtnSym; +select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1; -# XCorr -select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 +# XCorr; +select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05; ``` ### 8.6 数据修复 @@ -1336,24 +1233,24 @@ select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 更多见文档[Data-Repairing](../SQL-Manual/UDF-Libraries.md#数据修复) ```sql -# TimestampRepair -select timestamprepair(s1,'interval'='10000') from root.test.d2 -select timestamprepair(s1) from root.test.d2 +# TimestampRepair; +select timestamprepair(s1,'interval'='10000') from root.test.d2; +select timestamprepair(s1) from root.test.d2; -# ValueFill -select valuefill(s1) from root.test.d2 -select valuefill(s1,"method"="previous") from root.test.d2 +# ValueFill; +select valuefill(s1) from root.test.d2; +select valuefill(s1,"method"="previous") from root.test.d2; -# ValueRepair -select valuerepair(s1) from root.test.d2 -select valuerepair(s1,'method'='LsGreedy') from root.test.d2 +# ValueRepair; +select valuerepair(s1) from root.test.d2; +select valuerepair(s1,'method'='LsGreedy') from root.test.d2; -# MasterRepair -select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test +# MasterRepair; +select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test; -# SeasonalRepair -select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2 -select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 +# SeasonalRepair; +select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2; +select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2; ``` ### 8.7 序列发现 @@ -1361,12 +1258,12 @@ select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 更多见文档[Series-Discovery](../SQL-Manual/UDF-Libraries.md#序列发现) ```sql -# ConsecutiveSequences -select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1 -select consecutivesequences(s1,s2) from root.test.d1 +# ConsecutiveSequences; +select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1; +select consecutivesequences(s1,s2) from root.test.d1; -# ConsecutiveWindows -select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 +# ConsecutiveWindows; +select consecutivewindows(s1,s2,'length'='10m') from root.test.d1; ``` ### 8.8 机器学习 @@ -1374,14 +1271,14 @@ select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 更多见文档[Machine-Learning](../SQL-Manual/UDF-Libraries.md#机器学习) ```sql -# AR -select ar(s0,"p"="2") from root.test.d0 +# AR; +select ar(s0,"p"="2") from root.test.d0; -# Representation -select representation(s0,"tb"="3","vb"="2") from root.test.d0 +# Representation; +select representation(s0,"tb"="3","vb"="2") from root.test.d0; -# RM -select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 +# RM; +select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0; ``` ## 9. 条件表达式 @@ -1394,24 +1291,24 @@ when 1000=1050 then "bad temperature" when P<=1000000 or P>=1100000 then "bad pressure" end as `result` -from root.test1 +from root.test1; select str, case when str like "%cc%" then "has cc" when str like "%dd%" then "has dd" else "no cc and dd" end as `result` -from root.test2 +from root.test2; select count(case when x<=1 then 1 end) as `(-∞,1]`, count(case when 1 SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 配置连续查询的时间窗口大小 ```sql CREATE CONTINUOUS QUERY cq2 - RESAMPLE RANGE 40s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) +END; -END - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 同时配置连续查询执行的周期性间隔和时间窗口大小 ```sql CREATE CONTINUOUS QUERY cq3 - RESAMPLE EVERY 20s RANGE 40s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) - FILL(100.0) +END; -END - - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 配置连续查询每次查询执行时间窗口的结束时间 ```sql CREATE CONTINUOUS QUERY cq4 - RESAMPLE EVERY 20s RANGE 40s, 20s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) - FILL(100.0) +END; -END - - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 没有GROUP BY TIME子句的连续查询 ```sql CREATE CONTINUOUS QUERY cq5 - RESAMPLE EVERY 20s - BEGIN - SELECT temperature + 1 - INTO root.precalculated_sg.::(temperature) - FROM root.ln.*.* - align by device +END; -END - - - -\> SELECT temperature from root.precalculated_sg.*.* align by device; +SELECT temperature from root.precalculated_sg.*.* align by device; ``` ### 11.2 连续查询的管理 @@ -1686,18 +1499,12 @@ DROP CONTINUOUS QUERY s1_count_cq; 1. 创建一个连续查询 ```sql CREATE CQ s1_count_cq - BEGIN - -​ SELECT count(s1) - -​ INTO root.sg_count.d.count_s1 - -​ FROM root.sg.d - -​ GROUP BY(30m) - -END + SELECT count(s1) + INTO root.sg_count.d.count_s1 + FROM root.sg.d + GROUP BY(30m) +END; ``` 1. 查询连续查询的结果 ```sql @@ -1717,11 +1524,11 @@ CREATE FUNCTION AS (USING URI URI-STRING)? #### 不指定URI ```sql -CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' +CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample'; ``` #### 指定URI ```sql -CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http://jar/example.jar' +CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http://jar/example.jar'; ``` ### 12.3 UDF 卸载 @@ -1729,7 +1536,7 @@ CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http:// DROP FUNCTION ``` ```sql -DROP FUNCTION example +DROP FUNCTION example; ``` ### 12.4 UDF 查询 @@ -1743,16 +1550,13 @@ SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; #### 与其他查询的嵌套查询 ```sql SELECT s1, s2, example(s1, s2) FROM root.sg.d1; - SELECT *, example(*) FROM root.sg.d1 DISABLE ALIGN; - SELECT s1 * example(* / s1 + s2) FROM root.sg.d1; - SELECT s1, s2, s1 + example(s1, s2), s1 - example(s1 + example(s1, s2) / s2) FROM root.sg.d1; ``` ### 12.5 查看所有注册的 UDF ```sql -SHOW FUNCTIONS +SHOW FUNCTIONS; ``` ## 13. 权限管理 @@ -1760,68 +1564,63 @@ SHOW FUNCTIONS - 创建用户(需 MANAGE_USER 权限) - ```SQL -CREATE USER -eg: CREATE USER user1 'passwd' +CREATE USER ; +eg: CREATE USER user1 'passwd'; ``` - 删除用户 (需 MANEGE_USER 权限) - ```SQL -DROP USER -eg: DROP USER user1 +DROP USER ; +eg: DROP USER user1; ``` - 创建角色 (需 MANAGE_ROLE 权限) ```SQL -CREATE ROLE -eg: CREATE ROLE role1 +CREATE ROLE ; +eg: CREATE ROLE role1; ``` - 删除角色 (需 MANAGE_ROLE 权限) - ```SQL -DROP ROLE -eg: DROP ROLE role1 +DROP ROLE ; +eg: DROP ROLE role1; ``` - 赋予用户角色 (需 MANAGE_ROLE 权限) - ```SQL -GRANT ROLE TO -eg: GRANT ROLE admin TO user1 +GRANT ROLE TO ; +eg: GRANT ROLE admin TO user1; ``` - 移除用户角色 (需 MANAGE_ROLE 权限) - ```SQL -REVOKE ROLE FROM -eg: REVOKE ROLE admin FROM user1 +REVOKE ROLE FROM ; +eg: REVOKE ROLE admin FROM user1; ``` - 列出所有用户 (需 MANEGE_USER 权限) ```SQL -LIST USER +LIST USER; ``` - 列出所有角色 (需 MANAGE_ROLE 权限) ```SQL -LIST ROLE +LIST ROLE; ``` - 列出指定角色下所有用户 (需 MANEGE_USER 权限) ```SQL -LIST USER OF ROLE -eg: LIST USER OF ROLE roleuser +LIST USER OF ROLE ; +eg: LIST USER OF ROLE roleuser; ``` - 列出指定用户下所有角色 @@ -1829,8 +1628,8 @@ eg: LIST USER OF ROLE roleuser 用户可以列出自己的角色,但列出其他用户的角色需要拥有 MANAGE_ROLE 权限。 ```SQL -LIST ROLE OF USER -eg: LIST ROLE OF USER tempuser +LIST ROLE OF USER ; +eg: LIST ROLE OF USER tempuser; ``` - 列出用户所有权限 diff --git a/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_apache.md b/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_apache.md index cff2f64f3..e95978a87 100644 --- a/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_apache.md +++ b/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_apache.md @@ -76,7 +76,7 @@ comment - 注意:SQL中特殊字符或中文表名需加双引号。原生API中无需额外添加,否则表名会包含引号字符。 - 当为表命名时,最外层的双引号(`""`)不会在实际创建的表名中出现。 - - ```SQL + - ```shell -- SQL 中 "a""b" --> a"b """""" --> "" @@ -133,15 +133,20 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **示例:** -```SQL -IoTDB> show tables from database1 +```sql +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -168,7 +173,9 @@ IoTDB> show tables details from database1 **示例:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -183,8 +190,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -220,7 +230,9 @@ SHOW CREATE TABLE **示例:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -237,12 +249,16 @@ Total line number = 1 **语法:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; + +COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **说明:** @@ -254,11 +270,11 @@ ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? col **示例:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 1.6 删除表 @@ -268,12 +284,12 @@ COMMENT ON COLUMN table1.a IS null **语法:** ```SQL -DROP TABLE (IF EXISTS)? +DROP TABLE (IF EXISTS)? ; ``` **示例:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md b/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md index 32294fb21..201124a9f 100644 --- a/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md +++ b/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md @@ -76,7 +76,7 @@ comment - 注意:SQL中特殊字符或中文表名需加双引号。原生API中无需额外添加,否则表名会包含引号字符。 - 当为表命名时,最外层的双引号(`""`)不会在实际创建的表名中出现。 - - ```SQL + - ```shell -- SQL 中 "a""b" --> a"b """""" --> "" @@ -134,14 +134,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **示例:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -168,7 +173,9 @@ IoTDB> show tables details from database1 **示例:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -183,8 +190,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -221,7 +231,9 @@ SHOW CREATE TABLE **示例:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -238,12 +250,15 @@ Total line number = 1 **语法:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **说明:** @@ -255,11 +270,11 @@ ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? col **示例:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 1.6 删除表 @@ -275,6 +290,6 @@ DROP TABLE (IF EXISTS)? **示例:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_apache.md b/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_apache.md index 46e8a631f..ce5d55fdc 100644 --- a/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_apache.md +++ b/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_apache.md @@ -110,7 +110,9 @@ try (ITableSession session = 在代码执行完成后,可以通过下述语句确认表已成功创建,其中包含了时间列、标签列、属性列以及测点列等各类信息。 ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +-----------+---------+-----------+ | ColumnName| DataType| Category| +-----------+---------+-----------+ @@ -131,9 +133,9 @@ IoTDB> desc table1 **示例:** ```SQL -INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('北京', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1) +INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('北京', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北京', '1001', '100', '2025-11-26 13:38:00', 91.0) +INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北京', '1001', '100', '2025-11-26 13:38:00', 91.0); ``` ### 1.4 空值写入 @@ -143,10 +145,10 @@ INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北 **示例(与上述示例等价):** ```SQL -# 上述部分列写入等价于如下的带空值写入 -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1) +# 上述部分列写入等价于如下的带空值写入; +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null) +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null); ``` 当向不包含任何标签列的表中写入数据时,系统将默认创建一个所有标签列值均为 null 的device。 @@ -163,13 +165,13 @@ INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, tem INSERT INTO table1 VALUES ('2025-11-26 13:37:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('2025-11-26 13:38:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25') +('2025-11-26 13:38:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25'); INSERT INTO table1 (region, plant_id, device_id, model_id, maintenance, time, temperature, humidity, status, arrival_time) VALUES ('北京', '1001', '100', 'A', '180', '2025-11-26 13:37:00', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('北京', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25') +('北京', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25'); ``` #### 注意事项 @@ -194,7 +196,7 @@ INSERT INTO table_name [ ( column [, ... ] ) ] query 以[示例数据](../Reference/Sample-Data.md)为源数据,先创建目标表 ```SQL -IoTDB:database1> CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); +CREATE TABLE target_table ( time TIMESTAMP TIME, region STRING TAG, device_id STRING TAG, temperature FLOAT FIELD ); Msg: The statement is executed successfully. ``` @@ -205,9 +207,13 @@ Msg: The statement is executed successfully. 例如:使用标准查询语句,将 table1 中北京地区的 time, region, device\_id, temperature 数据查询写回到 target\_table 中 ```SQL -IoTDB:database1> insert into target_table select time,region,device_id,temperature from table1 where region = '北京' +insert into target_table select time,region,device_id,temperature from table1 where region = '北京'; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region='北京' +``` +```sql +select * from target_table where region='北京'; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -232,9 +238,13 @@ It costs 0.029s 例如:使用表引用查询,将 table3 中的数据查询写回到 target\_table 中 ```SQL -IoTDB:database1> insert into target_table(time,device_id,temperature) table table3 +insert into target_table(time,device_id,temperature) table table3; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region is null +``` +```sql +select * from target_table where region is null; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -257,9 +267,13 @@ It costs 0.015s 例如:使用子查询,将 table1 中时间与 table2 上海地区记录匹配的数据的 time, region, device\_id, temperature 查询写回到 target\_table ```SQL -IoTDB:database1> insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = '上海')) +insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = '上海')); Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region = '上海' +``` +```sql +select * from target_table where region = '上海'; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -313,7 +327,7 @@ insert into tableName(time, columnName) values(timeValue, to_object(isEOF, offse 向表 table1 中增加 object 类型字段 s1 ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS s1 OBJECT FIELD COMMENT 'object类型' +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS s1 OBJECT FIELD COMMENT 'object类型'; ``` 1. 不分段写入 @@ -325,12 +339,12 @@ insert into table1(time, device_id, s1) values(now(), 'tag1', to_object(true, 0, 2. 分段写入 ```SQL ---分段写入 object 数据 ---第一次写入:to_object(false, 0, X'696F') +--分段写入 object 数据; +--第一次写入:to_object(false, 0, X'696F'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(false, 0, X'696F')); ---第二次写入:to_object(false, 2, X'7464') +--第二次写入:to_object(false, 2, X'7464'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(false, 2, X'7464')); ---第三次写入:to_object(true, 4, X'62') +--第三次写入:to_object(true, 4, X'62'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(true, 4, X'62')); ``` @@ -364,5 +378,5 @@ updateAssignment **示例:** ```SQL -update table1 set b = a where substring(a, 1, 1) like '%' +update table1 set b = a where substring(a, 1, 1) like '%'; ``` \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md b/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md index 64ede20a8..666e07dfc 100644 --- a/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md +++ b/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md @@ -110,7 +110,9 @@ try (ITableSession session = 在代码执行完成后,可以通过下述语句确认表已成功创建,其中包含了时间列、标签列、属性列以及测点列等各类信息。 ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +-----------+---------+-----------+ | ColumnName| DataType| Category| +-----------+---------+-----------+ @@ -131,9 +133,9 @@ IoTDB> desc table1 **示例:** ```SQL -INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('北京', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1) +INSERT INTO table1(region, plant_id, device_id, time, temperature, humidity) VALUES ('北京', '1001', '100', '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北京', '1001', '100', '2025-11-26 13:38:00', 91.0) +INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北京', '1001', '100', '2025-11-26 13:38:00', 91.0); ``` ### 1.4 空值写入 @@ -143,10 +145,10 @@ INSERT INTO table1(region, plant_id, device_id, time, temperature) VALUES ('北 **示例(与上述示例等价):** ```SQL -# 上述部分列写入等价于如下的带空值写入 -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1) +# 上述部分列写入等价于如下的带空值写入; +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:37:00', 90.0, 35.1); -INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null) +INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, temperature, humidity) VALUES ('北京', '1001', '100', null, null, '2025-11-26 13:38:00', 91.0, null); ``` 当向不包含任何标签列的表中写入数据时,系统将默认创建一个所有标签列值均为 null 的device。 @@ -163,13 +165,13 @@ INSERT INTO table1(region, plant_id, device_id, model_id, maintenance, time, tem INSERT INTO table1 VALUES ('2025-11-26 13:37:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('2025-11-26 13:38:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25') +('2025-11-26 13:38:00', '北京', '1001', '100', 'A', '180', 90.0, 35.1, true, '2025-11-26 13:38:25'); INSERT INTO table1 (region, plant_id, device_id, model_id, maintenance, time, temperature, humidity, status, arrival_time) VALUES ('北京', '1001', '100', 'A', '180', '2025-11-26 13:37:00', 90.0, 35.1, true, '2025-11-26 13:37:34'), -('北京', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25') +('北京', '1001', '100', 'A', '180', '2025-11-26 13:38:00', 90.0, 35.1, true, '2025-11-26 13:38:25'); ``` #### 注意事项 @@ -205,9 +207,13 @@ Msg: The statement is executed successfully. 例如:使用标准查询语句,将 table1 中北京地区的 time, region, device\_id, temperature 数据查询写回到 target\_table 中 ```SQL -IoTDB:database1> insert into target_table select time,region,device_id,temperature from table1 where region = '北京' +insert into target_table select time,region,device_id,temperature from table1 where region = '北京'; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region='北京' +``` +```sql +select * from target_table where region='北京'; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -232,9 +238,13 @@ It costs 0.029s 例如:使用表引用查询,将 table3 中的数据查询写回到 target\_table 中 ```SQL -IoTDB:database1> insert into target_table(time,device_id,temperature) table table3 +insert into target_table(time,device_id,temperature) table table3; Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region is null +``` +```sql +select * from target_table where region is null; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -257,9 +267,13 @@ It costs 0.015s 例如:使用子查询,将 table1 中时间与 table2 上海地区记录匹配的数据的 time, region, device\_id, temperature 查询写回到 target\_table ```SQL -IoTDB:database1> insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = '上海')) +insert into target_table (select t1.time, t1.region as region, t1.device_id as device_id, t1.temperature as temperature from table1 t1 where t1.time in (select t2.time from table2 t2 where t2.region = '上海')); Msg: The statement is executed successfully. -IoTDB:database1> select * from target_table where region = '上海' +``` +```sql +select * from target_table where region = '上海'; +``` +```shell +-----------------------------+------+---------+-----------+ | time|region|device_id|temperature| +-----------------------------+------+---------+-----------+ @@ -314,7 +328,7 @@ insert into tableName(time, columnName) values(timeValue, to_object(isEOF, offse 向表 table1 中增加 object 类型字段 s1 ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS s1 OBJECT FIELD COMMENT 'object类型' +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS s1 OBJECT FIELD COMMENT 'object类型'; ``` 1. 不分段写入 @@ -326,12 +340,12 @@ insert into table1(time, device_id, s1) values(now(), 'tag1', to_object(true, 0, 2. 分段写入 ```SQL ---分段写入 object 数据 ---第一次写入:to_object(false, 0, X'696F') +--分段写入 object 数据; +--第一次写入:to_object(false, 0, X'696F'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(false, 0, X'696F')); ---第二次写入:to_object(false, 2, X'7464') +--第二次写入:to_object(false, 2, X'7464'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(false, 2, X'7464')); ---第三次写入:to_object(true, 4, X'62') +--第三次写入:to_object(true, 4, X'62'); insert into table1(time, device_id, s1) values(1, 'tag1', to_object(true, 4, X'62')); ``` @@ -365,5 +379,5 @@ updateAssignment **示例:** ```SQL -update table1 set b = a where substring(a, 1, 1) like '%' +update table1 set b = a where substring(a, 1, 1) like '%'; ``` diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_apache.md b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_apache.md index 406f3af92..7fd5deafe 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_apache.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_apache.md @@ -39,7 +39,7 @@ CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? CREATE DATABASE database1; CREATE DATABASE IF NOT EXISTS database1; --- 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年。 +-- 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年; CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); ``` @@ -54,7 +54,7 @@ USE **示例:** ```SQL -USE database1 +USE database1; ``` ### 1.3 查看当前数据库 @@ -62,22 +62,26 @@ USE database1 **语法:** ```SQL -SHOW CURRENT_DATABASE +SHOW CURRENT_DATABASE; ``` **示例:** ```SQL -IoTDB> SHOW CURRENT_DATABASE; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ | null| +---------------+ - -IoTDB> USE database1; - -IoTDB> SHOW CURRENT_DATABASE; +``` +```sql +USE database1; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ @@ -98,15 +102,20 @@ SHOW DATABASES (DETAILS)? **示例:** ```SQL -IoTDB> show databases +show databases; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| +------------------+-------+-----------------------+---------------------+---------------------+ | database1| INF| 1| 1| 604800000| |information_schema| INF| null| null| null| +------------------+-------+-----------------------+---------------------+---------------------+ - -IoTDB> show databases details +``` +```sql +show databases details; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|DataRegionGroupNum| +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ @@ -120,7 +129,7 @@ IoTDB> show databases details **语法:** ```SQL -ALTER DATABASE (IF EXISTS)? database=identifier SET PROPERTIES propertyAssignments +ALTER DATABASE (IF EXISTS)? database=identifier SET PROPERTIES propertyAssignments; ``` **示例:** @@ -134,13 +143,13 @@ ALTER DATABASE database1 SET PROPERTIES TTL=31536000000; **语法:** ```SQL -DROP DATABASE (IF EXISTS)? +DROP DATABASE (IF EXISTS)? ; ``` **示例:** ```SQL -DROP DATABASE IF EXISTS database1 +DROP DATABASE IF EXISTS database1; ``` ## 2. 表管理 @@ -216,14 +225,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **示例:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -242,7 +256,9 @@ IoTDB> show tables details from database1 **示例:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -257,8 +273,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -286,7 +305,9 @@ SHOW CREATE TABLE **示例:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -301,22 +322,25 @@ Total line number = 1 **语法:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **示例:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 2.6 删除表 @@ -324,14 +348,14 @@ COMMENT ON COLUMN table1.a IS null **语法:** ```SQL -DROP TABLE (IF EXISTS)? +DROP TABLE (IF EXISTS)? ; ``` **示例:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md index 27e7c1b7a..9efc3a547 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md @@ -39,7 +39,7 @@ CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? CREATE DATABASE database1; CREATE DATABASE IF NOT EXISTS database1; --- 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年。 +-- 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年; CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); ``` @@ -54,7 +54,7 @@ USE **示例:** ```SQL -USE database1 +USE database1; ``` ### 1.3 查看当前数据库 @@ -62,22 +62,26 @@ USE database1 **语法:** ```SQL -SHOW CURRENT_DATABASE +SHOW CURRENT_DATABASE; ``` **示例:** ```SQL -IoTDB> SHOW CURRENT_DATABASE; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ | null| +---------------+ - -IoTDB> USE database1; - -IoTDB> SHOW CURRENT_DATABASE; +``` +```sql +USE database1; +SHOW CURRENT_DATABASE; +``` +```shell +---------------+ |CurrentDatabase| +---------------+ @@ -98,15 +102,20 @@ SHOW DATABASES (DETAILS)? **示例:** ```SQL -IoTDB> show databases +show databases; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| +------------------+-------+-----------------------+---------------------+---------------------+ | database1| INF| 1| 1| 604800000| |information_schema| INF| null| null| null| +------------------+-------+-----------------------+---------------------+---------------------+ - -IoTDB> show databases details +``` +```sql +show databases details; +``` +```shell +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ | Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|DataRegionGroupNum| +------------------+-------+-----------------------+---------------------+---------------------+--------------------+------------------+ @@ -120,7 +129,7 @@ IoTDB> show databases details **语法:** ```SQL -ALTER DATABASE (IF EXISTS)? database=identifier SET PROPERTIES propertyAssignments +ALTER DATABASE (IF EXISTS)? database=identifier SET PROPERTIES propertyAssignments; ``` **示例:** @@ -134,13 +143,13 @@ ALTER DATABASE database1 SET PROPERTIES TTL=31536000000; **语法:** ```SQL -DROP DATABASE (IF EXISTS)? +DROP DATABASE (IF EXISTS)? ; ``` **示例:** ```SQL -DROP DATABASE IF EXISTS database1 +DROP DATABASE IF EXISTS database1; ``` ## 2. 表管理 @@ -216,14 +225,19 @@ SHOW TABLES (DETAILS)? ((FROM | IN) database_name)? **示例:** ```SQL -IoTDB> show tables from database1 +show tables from database1; +``` +```shell +---------+---------------+ |TableName| TTL(ms)| +---------+---------------+ | table1| 31536000000| +---------+---------------+ - -IoTDB> show tables details from database1 +``` +```sql +show tables details from database1; +``` +```shell +---------------+-----------+------+-------+ | TableName| TTL(ms)|Status|Comment| +---------------+-----------+------+-------+ @@ -242,7 +256,9 @@ IoTDB> show tables details from database1 **示例:** ```SQL -IoTDB> desc table1 +desc table1; +``` +```shell +------------+---------+---------+ | ColumnName| DataType| Category| +------------+---------+---------+ @@ -257,8 +273,11 @@ IoTDB> desc table1 | status| BOOLEAN| FIELD| |arrival_time|TIMESTAMP| FIELD| +------------+---------+---------+ - -IoTDB> desc table1 details +``` +```sql +desc table1 details; +``` +```shell +------------+---------+---------+------+------------+ | ColumnName| DataType| Category|Status| Comment| +------------+---------+---------+------+------------+ @@ -286,7 +305,9 @@ SHOW CREATE TABLE **示例:** ```SQL -IoTDB:database1> show create table table1 +show create table table1; +``` +```shell +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table| Create Table| +------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -301,22 +322,25 @@ Total line number = 1 **语法:** ```SQL -ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment' #addColumn -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier #dropColumn -// set TTL can use this -| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments #setTableProperties -| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment' -| COMMENT ON COLUMN tableName.column IS 'column_comment' +#addColumn; +ALTER TABLE (IF EXISTS)? tableName=qualifiedName ADD COLUMN (IF NOT EXISTS)? column=columnDefinition COMMENT 'column_comment'; +#dropColumn; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName DROP COLUMN (IF EXISTS)? column=identifier; +#setTableProperties; +// set TTL can use this; +| ALTER TABLE (IF EXISTS)? tableName=qualifiedName SET PROPERTIES propertyAssignments; +| COMMENT ON TABLE tableName=qualifiedName IS 'table_comment'; +| COMMENT ON COLUMN tableName.column IS 'column_comment'; ``` **示例:** ```SQL -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a' -ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b' -ALTER TABLE table1 set properties TTL=3600 -COMMENT ON TABLE table1 IS 'table1' -COMMENT ON COLUMN table1.a IS null +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS a TAG COMMENT 'a'; +ALTER TABLE table1 ADD COLUMN IF NOT EXISTS b FLOAT FIELD COMMENT 'b'; +ALTER TABLE table1 set properties TTL=3600; +COMMENT ON TABLE table1 IS 'table1'; +COMMENT ON COLUMN table1.a IS null; ``` ### 2.6 删除表 @@ -330,8 +354,8 @@ DROP TABLE (IF EXISTS)? **示例:** ```SQL -DROP TABLE table1 -DROP TABLE database1.table1 +DROP TABLE table1; +DROP TABLE database1.table1; ``` diff --git a/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_apache.md b/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_apache.md index 41ad6d179..720669e64 100644 --- a/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_apache.md +++ b/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_apache.md @@ -29,16 +29,16 @@ 我们可以根据存储模型建立相应的数据库。如下所示: -``` -IoTDB > CREATE DATABASE root.ln +```sql +CREATE DATABASE root.ln; ``` 需要注意的是,推荐创建一个 database. Database 的父子节点都不能再设置 database。例如在已经有`root.ln`和`root.sgcc`这两个 database 的情况下,创建`root.ln.wf01` database 是不可行的。系统将给出相应的错误提示,如下所示: -``` -IoTDB> CREATE DATABASE root.ln.wf01 +```sql +CREATE DATABASE root.ln.wf01; Msg: 300: root.ln has already been created as database. ``` Database 节点名命名规则: @@ -56,15 +56,15 @@ Database 节点名命名规则: 在 database 创建后,我们可以使用 [SHOW DATABASES](../SQL-Manual/SQL-Manual.md#查看数据库) 语句和 [SHOW DATABASES \](../SQL-Manual/SQL-Manual.md#查看数据库) 来查看 database,SQL 语句如下所示: -``` -IoTDB> show databases -IoTDB> show databases root.* -IoTDB> show databases root.** +```sql +show databases; +show databases root.*; +show databases root.**; ``` 执行结果为: -``` +```shell +-------------+----+-------------------------+-----------------------+-----------------------+ | database| ttl|schema_replication_factor|data_replication_factor|time_partition_interval| +-------------+----+-------------------------+-----------------------+-----------------------+ @@ -79,11 +79,11 @@ It costs 0.060s 用户可以使用`DELETE DATABASE `语句删除该路径模式匹配的所有的数据库。在删除的过程中,需要注意的是数据库的数据也会被删除。 -``` -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// 删除所有数据,时间序列以及数据库 -IoTDB > DELETE DATABASE root.** +```sql +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// 删除所有数据,时间序列以及数据库; +DELETE DATABASE root.**; ``` ### 1.4 统计数据库数量 @@ -92,17 +92,17 @@ IoTDB > DELETE DATABASE root.** SQL 语句如下所示: -``` -IoTDB> show databases -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +```sql +show databases; +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` 执行结果为: -``` +```shell +-------------+ | database| +-------------+ @@ -160,7 +160,7 @@ TTL的默认单位为毫秒,如果配置文件中的时间精度修改为其 #### TTL Path 规则 设置的路径 path 只支持前缀路径(即路径中间不能带 \* , 且必须以 \*\* 结尾),该路径会匹配到设备,也允许用户指定不带星的 path 为具体的 database 或 device,当 path 不带 \* 时,会检查是否匹配到 database,若匹配到 database,则会同时设置 path 和 path.\*\*。 注意:设备 TTL 设置不会对元数据的存在性进行校验,即允许对一条不存在的设备设置 TTL。 -``` +```shell 合格的 path: root.** root.db.** @@ -178,7 +178,7 @@ root.db.* #### 设置 TTL set ttl 操作可以理解为设置一条 TTL规则,比如 set ttl to root.sg.group1.\*\* 就相当于对所有可以匹配到该路径模式的设备挂载 ttl。 unset ttl 操作表示对相应路径模式卸载 TTL,若不存在对应 TTL,则不做任何事。若想把 TTL 调成无限大,则可以使用 INF 关键字 设置 TTL 的 SQL 语句如下所示: -``` +```sql set ttl to pathPattern 360000; ``` pathPattern 是前缀路径,即路径中间不能带 \* 且必须以 \*\* 结尾。 @@ -190,30 +190,30 @@ pathPattern 匹配对应的设备。为了兼容老版本 SQL 语法,允许用 取消 TTL 的 SQL 语句如下所示: -``` -IoTDB> unset ttl from root.ln +```sql +unset ttl from root.ln; ``` 取消设置 TTL 后, `root.ln` 路径下所有的数据都会被保存。 -``` -IoTDB> unset ttl from root.sgcc.** +```sql +unset ttl from root.sgcc.**; ``` 取消设置`root.sgcc`路径下的所有的 TTL 。 -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.**; ``` 取消设置所有的 TTL 。 新语法 -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.**; ``` 旧语法 -``` -IoTDB> unset ttl to root.** +```sql +unset ttl to root.**; ``` 新旧语法在功能上没有区别并且同时兼容,仅是新语法在用词上更符合常规。 #### 显示 TTL @@ -221,8 +221,10 @@ IoTDB> unset ttl to root.** 显示 TTL 的 SQL 语句如下所示: show all ttl +```sql +SHOW ALL TTL; ``` -IoTDB> SHOW ALL TTL +```shell +--------------+--------+ | path| TTL| | root.**|55555555| @@ -230,9 +232,12 @@ IoTDB> SHOW ALL TTL +--------------+--------+ ``` -show ttl on pathPattern +show ttl on pathPattern; + +```sql +SHOW TTL ON root.db.**; ``` -IoTDB> SHOW TTL ON root.db.**; +```shell +--------------+--------+ | path| TTL| | root.db.**|55555555| @@ -243,8 +248,10 @@ SHOW ALL TTL 这个例子会给出所有的 TTL。 SHOW TTL ON pathPattern 这个例子会显示指定路径的 TTL。 显示设备的 TTL。 +```sql +show devices; ``` -IoTDB> show devices +```shell +---------------+---------+---------+ | Device|IsAligned| TTL| +---------------+---------+---------+ @@ -283,12 +290,12 @@ IoTDB> show devices 用户可以在创建 Database 时设置上述任意异构参数,SQL 语句如下所示: -``` +```sql CREATE DATABASE prefixPath (WITH databaseAttributeClause (COMMA? databaseAttributeClause)*)? ``` 例如: -``` +```sql CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTOR=3, SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -296,12 +303,12 @@ CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTO 用户可以在 IoTDB 运行时调整部分异构参数,SQL 语句如下所示: -``` +```sql ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)* ``` 例如: -``` +```sql ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -313,14 +320,16 @@ ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; 用户可以查询每个 Database 的具体异构配置,SQL 语句如下所示: -``` +```sql SHOW DATABASES DETAILS prefixPath? ``` 例如: +```sql +SHOW DATABASES DETAILS; ``` -IoTDB> SHOW DATABASES DETAILS +```shell +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ |Database| TTL|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|MinSchemaRegionGroupNum|MaxSchemaRegionGroupNum|DataRegionGroupNum|MinDataRegionGroupNum|MaxDataRegionGroupNum| +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ @@ -365,21 +374,21 @@ CREATE DEVICE TEMPLATE ALIGNED? '(' create device template t1 (temperature FLOAT, status BOOLEAN) +```sql +create device template t1 (temperature FLOAT, status BOOLEAN); ``` **示例2:** 创建包含一组对齐序列的元数据模板 -```shell -IoTDB> create device template t2 aligned (lat FLOAT, lon FLOAT) +```sql +create device template t2 aligned (lat FLOAT, lon FLOAT); ``` 其中,物理量 `lat` 和 `lon` 是对齐的。 创建模板时,系统会默认指定编码压缩方式,无需手动指定,若业务场景需要手动调整,可参考如下示例: -```shell -IoTDB> create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) +```sql +create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY); ``` 更多详细的数据类型与编码方式的对应列表请参见 [压缩&编码](../Technical-Insider/Encoding-and-Compression.md)。 @@ -395,8 +404,8 @@ IoTDB> create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN 挂载元数据模板的 SQL 语句如下所示: -```shell -IoTDB> set device template t1 to root.sg1.d1 +```sql +set device template t1 to root.sg1.d1; ``` ### 2.3 激活设备模板 @@ -405,21 +414,21 @@ IoTDB> set device template t1 to root.sg1.d1 **注意**:在插入数据之前或系统未开启自动注册序列功能,模板定义的时间序列不会被创建。可以使用如下SQL语句在插入数据前创建时间序列即激活模板: -```shell -IoTDB> create timeseries using device template on root.sg1.d1 +```sql +create timeseries using device template on root.sg1.d1; ``` **示例:** 执行以下语句 -```shell -IoTDB> set device template t1 to root.sg1.d1 -IoTDB> set device template t2 to root.sg1.d2 -IoTDB> create timeseries using device template on root.sg1.d1 -IoTDB> create timeseries using device template on root.sg1.d2 +```sql +set device template t1 to root.sg1.d1; +set device template t2 to root.sg1.d2; +create timeseries using device template on root.sg1.d1; +create timeseries using device template on root.sg1.d2; ``` 查看此时的时间序列: ```sql -show timeseries root.sg1.** +show timeseries root.sg1.**; ``` ```shell @@ -435,7 +444,7 @@ show timeseries root.sg1.** 查看此时的设备: ```sql -show devices root.sg1.** +show devices root.sg1.**; ``` ```shell @@ -453,8 +462,8 @@ show devices root.sg1.** SQL 语句如下所示: -```shell -IoTDB> show device templates +```sql +show device templates; ``` 执行结果如下: @@ -471,8 +480,8 @@ IoTDB> show device templates SQL 语句如下所示: -```shell -IoTDB> show nodes in device template t1 +```sql +show nodes in device template t1; ``` 执行结果如下: @@ -487,8 +496,8 @@ IoTDB> show nodes in device template t1 - 查看挂载了某个设备模板的路径 -```shell -IoTDB> show paths set device template t1 +```sql +show paths set device template t1; ``` 执行结果如下: @@ -502,8 +511,8 @@ IoTDB> show paths set device template t1 - 查看使用了某个设备模板的路径(即模板在该路径上已激活,序列已创建) -```shell -IoTDB> show paths using device template t1 +```sql +show paths using device template t1; ``` 执行结果如下: @@ -519,26 +528,26 @@ IoTDB> show paths using device template t1 若需删除模板表示的某一组时间序列,可采用解除模板操作,SQL语句如下所示: -```shell -IoTDB> delete timeseries of device template t1 from root.sg1.d1 +```sql +delete timeseries of device template t1 from root.sg1.d1; ``` 或 -```shell -IoTDB> deactivate device template t1 from root.sg1.d1 +```sql +deactivate device template t1 from root.sg1.d1; ``` 解除操作支持批量处理,SQL语句如下所示: -```shell -IoTDB> delete timeseries of device template t1 from root.sg1.*, root.sg2.* +```sql +delete timeseries of device template t1 from root.sg1.*, root.sg2.*; ``` 或 -```shell -IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* +```sql +deactivate device template t1 from root.sg1.*, root.sg2.*; ``` 若解除命令不指定模板名称,则会将给定路径涉及的所有模板使用情况均解除。 @@ -547,8 +556,8 @@ IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* 卸载设备模板的 SQL 语句如下所示: -```shell -IoTDB> unset device template t1 from root.sg1.d1 +```sql +unset device template t1 from root.sg1.d1; ``` **注意**:不支持卸载仍处于激活状态的模板,需保证执行卸载操作前解除对该模板的所有使用,即删除所有该模板表示的序列。 @@ -557,8 +566,8 @@ IoTDB> unset device template t1 from root.sg1.d1 删除设备模板的 SQL 语句如下所示: -```shell -IoTDB> drop device template t1 +```sql +drop device template t1; ``` **注意**:不支持删除已经挂载的模板,需在删除操作前保证该模板卸载成功。 @@ -569,8 +578,8 @@ IoTDB> drop device template t1 修改设备模板的 SQL 语句如下所示: -```shell -IoTDB> alter device template t1 add (speed FLOAT) +```sql +alter device template t1 add (speed FLOAT); ``` **向已挂载模板的路径下的设备中写入数据,若写入请求中的物理量不在模板中,将自动扩展模板。** @@ -582,34 +591,34 @@ IoTDB> alter device template t1 add (speed FLOAT) 根据建立的数据模型,我们可以分别在两个数据库中创建相应的时间序列。创建时间序列的 SQL 语句如下所示: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +```sql +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` 从 v0.13 起,可以使用简化版的 SQL 语句创建时间序列: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT +```sql +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` 创建时间序列时,系统会默认指定编码压缩方式,无需手动指定,若业务场景需要手动调整,可参考如下示例: -``` -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY +```sql +create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY; ``` 需要注意的是,如果手动指定了编码方式,但与数据类型不对应时,系统会给出相应的错误提示,如下所示: -``` -IoTDB> create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF +```sql +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF; error: encoding TS_2DIFF does not support BOOLEAN ``` @@ -619,8 +628,8 @@ error: encoding TS_2DIFF does not support BOOLEAN 创建一组对齐时间序列的SQL语句如下所示: -``` -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +```sql +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` 一组对齐序列中的序列可以有不同的数据类型、编码方式以及压缩方式。 @@ -631,11 +640,11 @@ IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOA 我们可以使用`(DELETE | DROP) TimeSeries `语句来删除我们之前创建的时间序列。SQL 语句如下所示: -``` -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +```sql +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 3.4 查看时间序列 @@ -656,14 +665,14 @@ IoTDB> drop timeseries root.ln.wf02.* 返回给定路径的下的所有时间序列信息。其中 `Path` 需要为一个时间序列路径或路径模式。例如,分别查看`root`路径和`root.ln`路径下的时间序列,SQL 语句如下所示: -``` -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** +```sql +show timeseries root.**; +show timeseries root.ln.**; ``` 执行结果分别为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -694,21 +703,21 @@ It costs 0.004s 只返回从指定下标开始的结果,最大返回条数被 LIMIT 限制,用于分页查询。例如: -``` -show timeseries root.ln.** limit 10 offset 10 +```sql +show timeseries root.ln.** limit 10 offset 10; ``` * SHOW TIMESERIES WHERE TIMESERIES contains 'containStr' 对查询结果集根据 timeseries 名称进行字符串模糊匹配过滤。例如: -``` -show timeseries root.ln.** where timeseries contains 'wf01.wt' +```sql +show timeseries root.ln.** where timeseries contains 'wf01.wt'; ``` 执行结果为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -723,13 +732,13 @@ It costs 0.016s 对查询结果集根据时间序列数据类型进行过滤。例如: -``` -show timeseries root.ln.** where dataType=FLOAT +```sql +show timeseries root.ln.** where dataType=FLOAT; ``` 执行结果为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -748,14 +757,14 @@ It costs 0.016s 对查询结果集根据标签进行过滤。例如: -``` -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` 执行结果分别为: -``` +```shell +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags|attributes|deadband|deadband parameters| +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ @@ -771,7 +780,6 @@ It costs 0.005s +------------------------+-----+-------------+--------+--------+-----------+-----------------------+----------+--------+-------------------+ Total line number = 1 It costs 0.004s - ``` * SHOW LATEST TIMESERIES @@ -790,21 +798,21 @@ IoTDB 支持使用`COUNT TIMESERIES`来统计一条路径中的时间序 * 可以通过 `WHERE` 条件对标签点进行过滤,语法为: `COUNT TIMESERIES WHERE TAGS(key)='value'` 或 `COUNT TIMESERIES WHERE TAGS(key) contains 'value'`。 * 可以通过定义`LEVEL`来统计指定层级下的时间序列个数。这条语句可以用来统计每一个设备下的传感器数量,语法为:`COUNT TIMESERIES GROUP BY LEVEL=`。 -``` -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +```sql +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc'; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; ``` 例如有如下时间序列(可以使用`show timeseries`展示所有时间序列): -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -826,16 +834,20 @@ It costs 0.004s 可以看到,`root`被定义为`LEVEL=0`。那么当你输入如下语句时: -``` -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` 你将得到以下结果: +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` -IoTDB> COUNT TIMESERIES root.** GROUP BY LEVEL=1 +```shell +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -846,7 +858,6 @@ IoTDB> COUNT TIMESERIES root.** GROUP BY LEVEL=1 Total line number = 3 It costs 0.002s -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -856,7 +867,6 @@ IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 Total line number = 2 It costs 0.002s -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -878,8 +888,8 @@ It costs 0.002s * 属性只能用时间序列路径来查询:时间序列路径 -> 属性 所用到的扩展的创建时间序列的 SQL 语句如下所示: -``` -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +```sql +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` 括号里的`temprature`是`s1`这个传感器的别名。 @@ -892,48 +902,48 @@ create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v * 标签点属性更新 创建时间序列后,我们也可以对其原有的标签点属性进行更新,主要有以下六种更新方式: * 重命名标签或属性 -``` -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +```sql +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` * 重新设置标签或属性的值 -``` -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +```sql +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` * 删除已经存在的标签或属性 -``` -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +```sql +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` * 添加新的标签 -``` -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +```sql +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` * 添加新的属性 -``` -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +```sql +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` * 更新插入别名,标签和属性 > 如果该别名,标签或属性原来不存在,则插入,否则,用新值更新原来的旧值 -``` -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +```sql +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4); ``` * 使用标签作为过滤条件查询时间序列,使用 TAGS(tagKey) 来标识作为过滤条件的标签 -``` +```sql SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause ``` 返回给定路径的下的所有满足条件的时间序列信息,SQL 语句如下所示: -``` -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` 执行结果分别为: -``` +```shell +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags|attributes|deadband|deadband parameters| +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ @@ -953,23 +963,22 @@ It costs 0.004s - 使用标签作为过滤条件统计时间序列数量 -``` -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +```sql +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` 返回给定路径的下的所有满足条件的时间序列的数量,SQL 语句如下所示: -``` -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +```sql +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` 执行结果分别为: -``` -IoTDB> count timeseries +```shell +-----------------+ |count(timeseries)| +-----------------+ @@ -977,7 +986,7 @@ IoTDB> count timeseries +-----------------+ Total line number = 1 It costs 0.019s -IoTDB> count timeseries root.** where TAGS(unit)='c' + +-----------------+ |count(timeseries)| +-----------------+ @@ -985,7 +994,7 @@ IoTDB> count timeseries root.** where TAGS(unit)='c' +-----------------+ Total line number = 1 It costs 0.020s -IoTDB> count timeseries root.** where TAGS(unit)='c' group by level = 2 + +--------------+-----------------+ | column|count(timeseries)| +--------------+-----------------+ @@ -1001,14 +1010,16 @@ It costs 0.011s 创建对齐时间序列 -``` -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +```sql +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` 执行结果如下: +```sql +show timeseries; ``` -IoTDB> show timeseries +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -1019,8 +1030,10 @@ IoTDB> show timeseries 支持查询: +```sql +show timeseries where TAGS(tag1)='v1' ``` -IoTDB> show timeseries where TAGS(tag1)='v1' +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -1085,7 +1098,7 @@ IoTDB> show timeseries where TAGS(tag1)='v1' ### 4.5 查看路径的所有子路径 -``` +```sql SHOW CHILD PATHS pathPattern ``` @@ -1097,7 +1110,7 @@ SHOW CHILD PATHS pathPattern * 查询 root.ln 的下一层:show child paths root.ln -``` +```shell +------------+----------+ | child paths|node types| +------------+----------+ @@ -1110,7 +1123,7 @@ It costs 0.002s * 查询形如 root.xx.xx.xx 的路径:show child paths root.\*.\* -``` +```shell +---------------+ | child paths| +---------------+ @@ -1121,8 +1134,8 @@ It costs 0.002s ### 4.6 查看路径的下一级节点 -``` -SHOW CHILD NODES pathPattern +```sql +SHOW CHILD NODES pathPattern; ``` 可以查看此路径模式所匹配的节点的下一层的所有节点。 @@ -1131,7 +1144,7 @@ SHOW CHILD NODES pathPattern * 查询 root 的下一层:show child nodes root -``` +```shell +------------+ | child nodes| +------------+ @@ -1141,7 +1154,7 @@ SHOW CHILD NODES pathPattern * 查询 root.ln 的下一层 :show child nodes root.ln -``` +```shell +------------+ | child nodes| +------------+ @@ -1155,16 +1168,16 @@ SHOW CHILD NODES pathPattern IoTDB 支持使用`COUNT NODES LEVEL=`来统计当前 Metadata 树下满足某路径模式的路径中指定层级的节点个数。这条语句可以用来统计带有特定采样点的设备数。例如: -``` -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.* LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +```sql +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.* LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` 对于上面提到的例子和 Metadata Tree,你可以获得如下结果: -``` +```shell +------------+ |count(nodes)| +------------+ @@ -1214,19 +1227,19 @@ It costs 0.002s SQL 语句如下所示: -``` -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' -IoTDB> show devices root.ln.** where template = 't1' -IoTDB> show devices root.ln.** where template is null -IoTDB> show devices root.ln.** where template != 't1' -IoTDB> show devices root.ln.** where template is not null +```sql +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; +show devices root.ln.** where template = 't1'; +show devices root.ln.** where template is null; +show devices root.ln.** where template != 't1'; +show devices root.ln.** where template is not null; ``` 你可以获得如下数据: -``` +```shell +-------------------+---------+---------+ | devices|isAligned| Template| +-------------------+---------+---------+ @@ -1283,14 +1296,14 @@ It costs 0.001s SQL 语句如下所示: -``` -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +```sql +show devices with database; +show devices root.ln.** with database; ``` 你可以获得如下数据: -``` +```shell +-------------------+-------------+---------+---------+ | devices| database|isAligned| Template| +-------------------+-------------+---------+---------+ @@ -1320,15 +1333,15 @@ It costs 0.001s SQL 语句如下所示: -``` -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +```sql +show devices; +count devices; +count devices root.ln.**; ``` 你可以获得如下数据: -``` +```shell +-------------------+---------+---------+ | devices|isAligned| Template| +-------------------+---------+---------+ diff --git a/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md b/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md index 529aab548..75e6abc26 100644 --- a/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md @@ -29,16 +29,16 @@ 我们可以根据存储模型建立相应的数据库。如下所示: -``` -IoTDB > CREATE DATABASE root.ln +```sql +CREATE DATABASE root.ln; ``` 需要注意的是,推荐创建一个 database. Database 的父子节点都不能再设置 database。例如在已经有`root.ln`和`root.sgcc`这两个 database 的情况下,创建`root.ln.wf01` database 是不可行的。系统将给出相应的错误提示,如下所示: -``` -IoTDB> CREATE DATABASE root.ln.wf01 +```sql +CREATE DATABASE root.ln.wf01; Msg: 300: root.ln has already been created as database. ``` Database 节点名命名规则: @@ -55,15 +55,15 @@ Database 节点名命名规则: 在 database 创建后,我们可以使用 [SHOW DATABASES](../SQL-Manual/SQL-Manual.md#查看数据库) 语句和 [SHOW DATABASES \](../SQL-Manual/SQL-Manual.md#查看数据库) 来查看 database,SQL 语句如下所示: -``` -IoTDB> show databases -IoTDB> show databases root.* -IoTDB> show databases root.** +```sql +show databases; +show databases root.*; +show databases root.**; ``` 执行结果为: -``` +```shell +-------------+----+-------------------------+-----------------------+-----------------------+ | database| ttl|schema_replication_factor|data_replication_factor|time_partition_interval| +-------------+----+-------------------------+-----------------------+-----------------------+ @@ -78,11 +78,11 @@ It costs 0.060s 用户可以使用`DELETE DATABASE `语句删除该路径模式匹配的所有的数据库。在删除的过程中,需要注意的是数据库的数据也会被删除。 -``` -IoTDB > DELETE DATABASE root.ln -IoTDB > DELETE DATABASE root.sgcc -// 删除所有数据,时间序列以及数据库 -IoTDB > DELETE DATABASE root.** +```sql +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +// 删除所有数据,时间序列以及数据库; +DELETE DATABASE root.**; ``` ### 1.4 统计数据库数量 @@ -91,17 +91,17 @@ IoTDB > DELETE DATABASE root.** SQL 语句如下所示: -``` -IoTDB> show databases -IoTDB> count databases -IoTDB> count databases root.* -IoTDB> count databases root.sgcc.* -IoTDB> count databases root.sgcc +```sql +show databases; +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` 执行结果为: -``` +```shell +-------------+ | database| +-------------+ @@ -159,7 +159,7 @@ TTL的默认单位为毫秒,如果配置文件中的时间精度修改为其 #### TTL Path 规则 设置的路径 path 只支持前缀路径(即路径中间不能带 \* , 且必须以 \*\* 结尾),该路径会匹配到设备,也允许用户指定不带星的 path 为具体的 database 或 device,当 path 不带 \* 时,会检查是否匹配到 database,若匹配到 database,则会同时设置 path 和 path.\*\*。 注意:设备 TTL 设置不会对元数据的存在性进行校验,即允许对一条不存在的设备设置 TTL。 -``` +```shell 合格的 path: root.** root.db.** @@ -177,7 +177,7 @@ root.db.* #### 设置 TTL set ttl 操作可以理解为设置一条 TTL规则,比如 set ttl to root.sg.group1.\*\* 就相当于对所有可以匹配到该路径模式的设备挂载 ttl。 unset ttl 操作表示对相应路径模式卸载 TTL,若不存在对应 TTL,则不做任何事。若想把 TTL 调成无限大,则可以使用 INF 关键字 设置 TTL 的 SQL 语句如下所示: -``` +```sql set ttl to pathPattern 360000; ``` pathPattern 是前缀路径,即路径中间不能带 \* 且必须以 \*\* 结尾。 @@ -189,30 +189,30 @@ pathPattern 匹配对应的设备。为了兼容老版本 SQL 语法,允许用 取消 TTL 的 SQL 语句如下所示: -``` -IoTDB> unset ttl from root.ln +```sql +unset ttl from root.ln; ``` 取消设置 TTL 后, `root.ln` 路径下所有的数据都会被保存。 -``` -IoTDB> unset ttl from root.sgcc.** +```sql +unset ttl from root.sgcc.**; ``` 取消设置`root.sgcc`路径下的所有的 TTL 。 -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.**; ``` 取消设置所有的 TTL 。 新语法 -``` -IoTDB> unset ttl from root.** +```sql +unset ttl from root.**; ``` 旧语法 -``` -IoTDB> unset ttl to root.** +```sql +unset ttl to root.**; ``` 新旧语法在功能上没有区别并且同时兼容,仅是新语法在用词上更符合常规。 #### 显示 TTL @@ -220,8 +220,10 @@ IoTDB> unset ttl to root.** 显示 TTL 的 SQL 语句如下所示: show all ttl +```sql +SHOW ALL TTL; ``` -IoTDB> SHOW ALL TTL +```shell +--------------+--------+ | path| TTL| | root.**|55555555| @@ -230,8 +232,10 @@ IoTDB> SHOW ALL TTL ``` show ttl on pathPattern +```sql +SHOW TTL ON root.db.**; ``` -IoTDB> SHOW TTL ON root.db.**; +```shell +--------------+--------+ | path| TTL| | root.db.**|55555555| @@ -242,8 +246,10 @@ SHOW ALL TTL 这个例子会给出所有的 TTL。 SHOW TTL ON pathPattern 这个例子会显示指定路径的 TTL。 显示设备的 TTL。 +```sql +show devices; ``` -IoTDB> show devices +```shell +---------------+---------+---------+ | Device|IsAligned| TTL| +---------------+---------+---------+ @@ -282,12 +288,12 @@ IoTDB> show devices 用户可以在创建 Database 时设置上述任意异构参数,SQL 语句如下所示: -``` +```sql CREATE DATABASE prefixPath (WITH databaseAttributeClause (COMMA? databaseAttributeClause)*)? ``` 例如: -``` +```sql CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTOR=3, SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -295,12 +301,12 @@ CREATE DATABASE root.db WITH SCHEMA_REPLICATION_FACTOR=1, DATA_REPLICATION_FACTO 用户可以在 IoTDB 运行时调整部分异构参数,SQL 语句如下所示: -``` +```sql ALTER DATABASE prefixPath WITH databaseAttributeClause (COMMA? databaseAttributeClause)* ``` 例如: -``` +```sql ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; ``` @@ -312,14 +318,16 @@ ALTER DATABASE root.db WITH SCHEMA_REGION_GROUP_NUM=1, DATA_REGION_GROUP_NUM=2; 用户可以查询每个 Database 的具体异构配置,SQL 语句如下所示: -``` +```sql SHOW DATABASES DETAILS prefixPath? ``` 例如: +```sql +SHOW DATABASES DETAILS; ``` -IoTDB> SHOW DATABASES DETAILS +```shell +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ |Database| TTL|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum|MinSchemaRegionGroupNum|MaxSchemaRegionGroupNum|DataRegionGroupNum|MinDataRegionGroupNum|MaxDataRegionGroupNum| +--------+--------+-----------------------+---------------------+---------------------+--------------------+-----------------------+-----------------------+------------------+---------------------+---------------------+ @@ -351,34 +359,34 @@ It costs 0.058s 根据建立的数据模型,我们可以分别在两个数据库中创建相应的时间序列。创建时间序列的 SQL 语句如下所示: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +```sql +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` 从 v0.13 起,可以使用简化版的 SQL 语句创建时间序列: -``` -IoTDB > create timeseries root.ln.wf01.wt01.status BOOLEAN -IoTDB > create timeseries root.ln.wf01.wt01.temperature FLOAT -IoTDB > create timeseries root.ln.wf02.wt02.hardware TEXT -IoTDB > create timeseries root.ln.wf02.wt02.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.status BOOLEAN -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT +```sql +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` 创建时间序列时,系统会默认指定编码压缩方式,无需手动指定,若业务场景需要手动调整,可参考如下示例: -``` -IoTDB > create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY +```sql +create timeseries root.sgcc.wf03.wt01.temperature FLOAT encoding=PLAIN compressor=SNAPPY; ``` 需要注意的是,如果手动指定了编码方式,但与数据类型不对应时,系统会给出相应的错误提示,如下所示: -``` -IoTDB> create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF +```sql +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN, ENCODING=TS_2DIFF; error: encoding TS_2DIFF does not support BOOLEAN ``` @@ -389,8 +397,8 @@ error: encoding TS_2DIFF does not support BOOLEAN 创建一组对齐时间序列的SQL语句如下所示: -``` -IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +```sql +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` 一组对齐序列中的序列可以有不同的数据类型、编码方式以及压缩方式。 @@ -401,11 +409,11 @@ IoTDB> CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOA 我们可以使用`(DELETE | DROP) TimeSeries `语句来删除我们之前创建的时间序列。SQL 语句如下所示: -``` -IoTDB> delete timeseries root.ln.wf01.wt01.status -IoTDB> delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -IoTDB> delete timeseries root.ln.wf02.* -IoTDB> drop timeseries root.ln.wf02.* +```sql +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` ### 2.4 查看时间序列 @@ -426,14 +434,14 @@ IoTDB> drop timeseries root.ln.wf02.* 返回给定路径的下的所有时间序列信息。其中 `Path` 需要为一个时间序列路径或路径模式。例如,分别查看`root`路径和`root.ln`路径下的时间序列,SQL 语句如下所示: -``` -IoTDB> show timeseries root.** -IoTDB> show timeseries root.ln.** +```sql +show timeseries root.**; +show timeseries root.ln.**; ``` 执行结果分别为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -464,21 +472,21 @@ It costs 0.004s 只返回从指定下标开始的结果,最大返回条数被 LIMIT 限制,用于分页查询。例如: -``` -show timeseries root.ln.** limit 10 offset 10 +```sql +show timeseries root.ln.** limit 10 offset 10; ``` * SHOW TIMESERIES WHERE TIMESERIES contains 'containStr' 对查询结果集根据 timeseries 名称进行字符串模糊匹配过滤。例如: -``` -show timeseries root.ln.** where timeseries contains 'wf01.wt' +```sql +show timeseries root.ln.** where timeseries contains 'wf01.wt'; ``` 执行结果为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -493,13 +501,13 @@ It costs 0.016s 对查询结果集根据时间序列数据类型进行过滤。例如: -``` -show timeseries root.ln.** where dataType=FLOAT +```sql +show timeseries root.ln.** where dataType=FLOAT; ``` 执行结果为: -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -517,14 +525,14 @@ It costs 0.016s 对查询结果集根据标签进行过滤。例如: -``` -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` 执行结果分别为: -``` +```shell +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags|attributes|deadband|deadband parameters| +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ @@ -561,21 +569,21 @@ IoTDB 支持使用`COUNT TIMESERIES`来统计一条路径中的时间序 * 可以通过 `WHERE` 条件对标签点进行过滤,语法为: `COUNT TIMESERIES WHERE TAGS(key)='value'` 或 `COUNT TIMESERIES WHERE TAGS(key) contains 'value'`。 * 可以通过定义`LEVEL`来统计指定层级下的时间序列个数。这条语句可以用来统计每一个设备下的传感器数量,语法为:`COUNT TIMESERIES GROUP BY LEVEL=`。 -``` -IoTDB > COUNT TIMESERIES root.** -IoTDB > COUNT TIMESERIES root.ln.** -IoTDB > COUNT TIMESERIES root.ln.*.*.status -IoTDB > COUNT TIMESERIES root.ln.wf01.wt01.status -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -IoTDB > COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -IoTDB > COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +```sql +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' ; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' ; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' ; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; ``` 例如有如下时间序列(可以使用`show timeseries`展示所有时间序列): -``` +```shell +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ | timeseries| alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +-------------------------------+--------+-------------+--------+--------+-----------+-------------------------------------------+--------------------------------------------------------+--------+-------------------+ @@ -597,16 +605,20 @@ It costs 0.004s 可以看到,`root`被定义为`LEVEL=0`。那么当你输入如下语句时: -``` -IoTDB > COUNT TIMESERIES root.** GROUP BY LEVEL=1 -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` 你将得到以下结果: +```sql +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` -IoTDB> COUNT TIMESERIES root.** GROUP BY LEVEL=1 +```shell +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -617,7 +629,6 @@ IoTDB> COUNT TIMESERIES root.** GROUP BY LEVEL=1 Total line number = 3 It costs 0.002s -IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -627,7 +638,6 @@ IoTDB > COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 Total line number = 2 It costs 0.002s -IoTDB > COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +------------+-----------------+ | column|count(timeseries)| +------------+-----------------+ @@ -645,11 +655,15 @@ It costs 0.002s 需要注意的是, 在带有时间过滤的元数据查询中并不考虑视图的存在,只考虑TsFile中实际存储的时间序列。 一个使用样例如下: -``` -IoTDB> insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); -IoTDB> insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); -IoTDB> insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); -IoTDB> show timeseries; +```sql +insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); +insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); +insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); +show timeseries; +show timeseries where time >= 15000 and time < 16000; +count timeseries where time >= 15000 and time < 16000; +``` +```shell +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ | Timeseries|Alias|Database|DataType|Encoding|Compression|Tags|Attributes|Deadband|DeadbandParameters|ViewType| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ @@ -661,7 +675,6 @@ IoTDB> show timeseries; |root.sg.data2.s2| null| root.sg| FLOAT| GORILLA| LZ4|null| null| null| null| BASE| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ -IoTDB> show timeseries where time >= 15000 and time < 16000; +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ | Timeseries|Alias|Database|DataType|Encoding|Compression|Tags|Attributes|Deadband|DeadbandParameters|ViewType| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ @@ -671,7 +684,6 @@ IoTDB> show timeseries where time >= 15000 and time < 16000; |root.sg.data2.s2| null| root.sg| FLOAT| GORILLA| LZ4|null| null| null| null| BASE| +----------------+-----+--------+--------+--------+-----------+----+----------+--------+------------------+--------+ -IoTDB> count timeseries where time >= 15000 and time < 16000; +-----------------+ |count(timeseries)| +-----------------+ @@ -690,8 +702,8 @@ IoTDB> count timeseries where time >= 15000 and time < 16000; * 属性只能用时间序列路径来查询:时间序列路径 -> 属性 所用到的扩展的创建时间序列的 SQL 语句如下所示: -``` -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT, encoding=RLE, compression=SNAPPY tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +```sql +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT, encoding=RLE, compression=SNAPPY tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` 括号里的`temprature`是`s1`这个传感器的别名。 @@ -704,48 +716,48 @@ create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT, encoding=R * 标签点属性更新 创建时间序列后,我们也可以对其原有的标签点属性进行更新,主要有以下六种更新方式: * 重命名标签或属性 -``` -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +```sql +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` * 重新设置标签或属性的值 -``` -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +```sql +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` * 删除已经存在的标签或属性 -``` -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +```sql +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` * 添加新的标签 -``` -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +```sql +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` * 添加新的属性 -``` -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +```sql +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` * 更新插入别名,标签和属性 > 如果该别名,标签或属性原来不存在,则插入,否则,用新值更新原来的旧值 -``` -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +```sql +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4); ``` * 使用标签作为过滤条件查询时间序列,使用 TAGS(tagKey) 来标识作为过滤条件的标签 -``` +```sql SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause ``` 返回给定路径的下的所有满足条件的时间序列信息,SQL 语句如下所示: -``` -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +```sql +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` 执行结果分别为: -``` +```shell +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags|attributes|deadband|deadband parameters| +--------------------------+-----+-------------+--------+--------+-----------+------------+----------+--------+-------------------+ @@ -765,23 +777,22 @@ It costs 0.004s - 使用标签作为过滤条件统计时间序列数量 -``` -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +```sql +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` 返回给定路径的下的所有满足条件的时间序列的数量,SQL 语句如下所示: -``` -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +```sql +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` 执行结果分别为: -``` -IoTDB> count timeseries +```shell +-----------------+ |count(timeseries)| +-----------------+ @@ -789,7 +800,7 @@ IoTDB> count timeseries +-----------------+ Total line number = 1 It costs 0.019s -IoTDB> count timeseries root.** where TAGS(unit)='c' + +-----------------+ |count(timeseries)| +-----------------+ @@ -797,7 +808,7 @@ IoTDB> count timeseries root.** where TAGS(unit)='c' +-----------------+ Total line number = 1 It costs 0.020s -IoTDB> count timeseries root.** where TAGS(unit)='c' group by level = 2 + +--------------+-----------------+ | column|count(timeseries)| +--------------+-----------------+ @@ -813,14 +824,16 @@ It costs 0.011s 创建对齐时间序列 -``` -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +```sql +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` 执行结果如下: +```sql +show timeseries; ``` -IoTDB> show timeseries +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -831,8 +844,10 @@ IoTDB> show timeseries 支持查询: +```sql +show timeseries where TAGS(tag1)='v1'; ``` -IoTDB> show timeseries where TAGS(tag1)='v1' +```shell +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ | timeseries|alias| database|dataType|encoding|compression| tags| attributes|deadband|deadband parameters| +--------------+-----+-------------+--------+--------+-----------+-------------------------+---------------------------+--------+-------------------+ @@ -897,8 +912,8 @@ IoTDB> show timeseries where TAGS(tag1)='v1' ### 3.5 查看路径的所有子路径 -``` -SHOW CHILD PATHS pathPattern +```sql +SHOW CHILD PATHS pathPattern; ``` 可以查看此路径模式所匹配的所有路径的下一层的所有路径和它对应的节点类型,即pathPattern.*所匹配的路径及其节点类型。 @@ -909,7 +924,7 @@ SHOW CHILD PATHS pathPattern * 查询 root.ln 的下一层:show child paths root.ln -``` +```shell +------------+----------+ | child paths|node types| +------------+----------+ @@ -922,7 +937,7 @@ It costs 0.002s * 查询形如 root.xx.xx.xx 的路径:show child paths root.\*.\* -``` +```shell +---------------+ | child paths| +---------------+ @@ -933,8 +948,8 @@ It costs 0.002s ### 3.6 查看路径的下一级节点 -``` -SHOW CHILD NODES pathPattern +```sql +SHOW CHILD NODES pathPattern; ``` 可以查看此路径模式所匹配的节点的下一层的所有节点。 @@ -943,7 +958,7 @@ SHOW CHILD NODES pathPattern * 查询 root 的下一层:show child nodes root -``` +```shell +------------+ | child nodes| +------------+ @@ -953,7 +968,7 @@ SHOW CHILD NODES pathPattern * 查询 root.ln 的下一层 :show child nodes root.ln -``` +```shell +------------+ | child nodes| +------------+ @@ -967,16 +982,16 @@ SHOW CHILD NODES pathPattern IoTDB 支持使用`COUNT NODES LEVEL=`来统计当前 Metadata 树下满足某路径模式的路径中指定层级的节点个数。这条语句可以用来统计带有特定采样点的设备数。例如: -``` -IoTDB > COUNT NODES root.** LEVEL=2 -IoTDB > COUNT NODES root.ln.** LEVEL=2 -IoTDB > COUNT NODES root.ln.wf01.* LEVEL=3 -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +```sql +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.* LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` 对于上面提到的例子和 Metadata Tree,你可以获得如下结果: -``` +```shell +------------+ |count(nodes)| +------------+ @@ -1024,15 +1039,15 @@ It costs 0.002s SQL 语句如下所示: -``` -IoTDB> show devices -IoTDB> show devices root.ln.** -IoTDB> show devices root.ln.** where device contains 't' +```sql +show devices; +show devices root.ln.**; +show devices root.ln.** where device contains 't'; ``` 你可以获得如下数据: -``` +```shell +-------------------+---------+---------+ | devices|isAligned| Template| +-------------------+---------+---------+ @@ -1074,14 +1089,14 @@ It costs 0.001s SQL 语句如下所示: -``` -IoTDB> show devices with database -IoTDB> show devices root.ln.** with database +```sql +show devices with database; +show devices root.ln.** with database; ``` 你可以获得如下数据: -``` +```shell +-------------------+-------------+---------+---------+ | devices| database|isAligned| Template| +-------------------+-------------+---------+---------+ @@ -1111,15 +1126,15 @@ It costs 0.001s SQL 语句如下所示: -``` -IoTDB> show devices -IoTDB> count devices -IoTDB> count devices root.ln.** +```sql +show devices; +count devices; +count devices root.ln.**; ``` 你可以获得如下数据: -``` +```shell +-------------------+---------+---------+ | devices|isAligned| Template| +-------------------+---------+---------+ @@ -1150,11 +1165,15 @@ It costs 0.004s ### 3.10 活跃设备查询 和活跃时间序列一样,我们可以在查看和统计设备的基础上添加时间过滤条件来查询在某段时间内存在数据的活跃设备。这里活跃的定义与活跃时间序列相同,使用样例如下: -``` -IoTDB> insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); -IoTDB> insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); -IoTDB> insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); -IoTDB> show devices; +```sql +insert into root.sg.data(timestamp, s1,s2) values(15000, 1, 2); +insert into root.sg.data2(timestamp, s1,s2) values(15002, 1, 2); +insert into root.sg.data3(timestamp, s1,s2) values(16000, 1, 2); +show devices; +show devices where time >= 15000 and time < 16000; +count devices where time >= 15000 and time < 16000; +``` +```shell +-------------------+---------+ | devices|isAligned| +-------------------+---------+ @@ -1163,7 +1182,6 @@ IoTDB> show devices; | root.sg.data3| false| +-------------------+---------+ -IoTDB> show devices where time >= 15000 and time < 16000; +-------------------+---------+ | devices|isAligned| +-------------------+---------+ @@ -1171,7 +1189,6 @@ IoTDB> show devices where time >= 15000 and time < 16000; | root.sg.data2| false| +-------------------+---------+ -IoTDB> count devices where time >= 15000 and time < 16000; +--------------+ |count(devices)| +--------------+ diff --git a/src/zh/UserGuide/latest/Basic-Concept/Query-Data_apache.md b/src/zh/UserGuide/latest/Basic-Concept/Query-Data_apache.md index dd6fa3a4f..aa5134863 100644 --- a/src/zh/UserGuide/latest/Basic-Concept/Query-Data_apache.md +++ b/src/zh/UserGuide/latest/Basic-Concept/Query-Data_apache.md @@ -117,7 +117,7 @@ IoTDB 支持即席(Ad_hoc)查询,即支持用户在使用系统时,自定义 SQL 语句为: ```sql -select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` 其含义为: @@ -126,7 +126,7 @@ select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.temperature| +-----------------------------+-----------------------------+ @@ -157,7 +157,7 @@ select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05: 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -188,7 +188,7 @@ select status, temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -220,7 +220,7 @@ select wf01.wt01.status, wf02.wt02.hardware from root.ln where (time > 2017-11-0 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+--------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf02.wt02.hardware| +-----------------------------+------------------------+--------------------------+ @@ -248,7 +248,7 @@ select * from root.ln.** where time > 1 order by time desc limit 10; 语句执行的结果为: -``` +```shell +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ | Time|root.ln.wf02.wt02.hardware|root.ln.wf02.wt02.status|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ @@ -428,7 +428,7 @@ from root.sg1; 运行结果: -``` +```shell +-----------------------------+----------+----------+----------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Time|root.sg1.a|root.sg1.b|((((root.sg1.a + 1) * 2) - 1) % 2) + 1.5|sin(root.sg1.a + sin(root.sg1.a + sin(root.sg1.b)))|(-root.sg1.a + root.sg1.b * ((sin(root.sg1.a + root.sg1.b) * sin(root.sg1.a + root.sg1.b)) + (cos(root.sg1.a + root.sg1.b) * cos(root.sg1.a + root.sg1.b)))) + 1| +-----------------------------+----------+----------+----------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -446,12 +446,12 @@ It costs 0.048s **示例 2:** ```sql -select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; ``` 运行结果: -``` +```shell +-----------------------------+----------------------------------------------+ | Time|((root.sg.a + root.sg.b) * 2) + sin(root.sg.a)| +-----------------------------+----------------------------------------------+ @@ -472,12 +472,12 @@ It costs 0.011s **示例 3:** ```sql -select (a + *) / 2 from root.sg1 +select (a + *) / 2 from root.sg1; ``` 运行结果: -``` +```shell +-----------------------------+-----------------------------+-----------------------------+ | Time|(root.sg1.a + root.sg1.a) / 2|(root.sg1.a + root.sg1.b) / 2| +-----------------------------+-----------------------------+-----------------------------+ @@ -494,12 +494,12 @@ It costs 0.011s **示例 4:** ```sql -select (a + b) * 3 from root.sg, root.ln +select (a + b) * 3 from root.sg, root.ln; ``` 运行结果: -``` +```shell +-----------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ | Time|(root.sg.a + root.sg.b) * 3|(root.sg.a + root.ln.b) * 3|(root.ln.a + root.sg.b) * 3|(root.ln.a + root.ln.b) * 3| +-----------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ @@ -538,7 +538,7 @@ from root.ln.wf01.wt01; 运行结果: -``` +```shell +----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+--------------------------------------------------------------------+ |avg(root.ln.wf01.wt01.temperature)|sin(avg(root.ln.wf01.wt01.temperature))|avg(root.ln.wf01.wt01.temperature) + 1|-sum(root.ln.wf01.wt01.hardware)|avg(root.ln.wf01.wt01.temperature) + sum(root.ln.wf01.wt01.hardware)| +----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+--------------------------------------------------------------------+ @@ -553,12 +553,12 @@ It costs 0.009s ```sql select avg(*), (avg(*) + 1) * 3 / 2 -1 -from root.sg1 +from root.sg1; ``` 运行结果: -``` +```shell +---------------+---------------+-------------------------------------+-------------------------------------+ |avg(root.sg1.a)|avg(root.sg1.b)|(avg(root.sg1.a) + 1) * 3 / 2 - 1 |(avg(root.sg1.b) + 1) * 3 / 2 - 1 | +---------------+---------------+-------------------------------------+-------------------------------------+ @@ -582,7 +582,7 @@ GROUP BY([10, 90), 10ms); 运行结果: -``` +```shell +-----------------------------+----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+----------+ | Time|avg(root.ln.wf01.wt01.temperature)|sin(avg(root.ln.wf01.wt01.temperature))|avg(root.ln.wf01.wt01.temperature) + 1|-sum(root.ln.wf01.wt01.hardware)|custom_sum| +-----------------------------+----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+----------+ @@ -617,7 +617,7 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < - 结果集为四列的结构: - ``` + ```shell +----+----------+-----+--------+ |Time|timeseries|value|dataType| +----+----------+-----+--------+ @@ -627,8 +627,10 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < **示例 1:** 查询 root.ln.wf01.wt01.status 的最新数据点 +```sql +select last status from root.ln.wf01.wt01; ``` -IoTDB> select last status from root.ln.wf01.wt01 +```shell +-----------------------------+------------------------+-----+--------+ | Time| timeseries|value|dataType| +-----------------------------+------------------------+-----+--------+ @@ -640,8 +642,10 @@ It costs 0.000s **示例 2:** 查询 root.ln.wf01.wt01 下 status,temperature 时间戳大于等于 2017-11-07T23:50:00 的最新数据点。 +```sql +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; ``` -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -654,8 +658,10 @@ It costs 0.002s **示例 3:** 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照序列名降序排列。 +```sql +select last * from root.ln.wf01.wt01 order by timeseries desc; ``` -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -668,8 +674,10 @@ It costs 0.002s **示例 4:** 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照dataType降序排列。 +```sql +select last * from root.ln.wf01.wt01 order by dataType desc; ``` -IoTDB> select last * from root.ln.wf01.wt01 order by dataType desc; +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -682,8 +690,10 @@ It costs 0.002s **注意:** 可以通过函数组合方式实现其他过滤条件查询最新点的需求,例如 +```sql +select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device; ``` -IoTDB> select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +```shell +-----------------+---------------------+----------------+-----------------------+------------------+ | Device|max_time(temperature)|max_time(status)|last_value(temperature)|last_value(status)| +-----------------+---------------------+----------------+-----------------------+------------------+ @@ -793,8 +803,10 @@ It costs 0.021s **示例 1:** 查询 `root.sg.d1` 下 `value` 含有`'cc'`的数据。 +```sql +select * from root.sg.d1 where value like '%cc%'; ``` -IoTDB> select * from root.sg.d1 where value like '%cc%' +```shell +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -807,8 +819,10 @@ It costs 0.002s **示例 2:** 查询 `root.sg.d1` 下 `value` 中间为 `'b'`、前后为任意单个字符的数据。 +```sql +select * from root.sg.device where value like '_b_'; ``` -IoTDB> select * from root.sg.device where value like '_b_' +```shell +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -824,7 +838,7 @@ It costs 0.002s **常见的正则匹配举例:** -``` +```shell 长度为3-20的所有字符:^.{3,20}$ 大写英文字符:^[A-Z]+$ 数字和英文字符:^[A-Za-z0-9]+$ @@ -833,8 +847,10 @@ It costs 0.002s **示例 1:** 查询 root.sg.d1 下 value 值为26个英文字符组成的字符串。 +```sql +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +``` ```shell -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -847,8 +863,10 @@ It costs 0.002s **示例 2:** 查询 root.sg.d1 下 value 值为26个小写英文字符组成的字符串且时间大于100的。 +```sql +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; +``` ```shell -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -903,7 +921,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -943,7 +961,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -968,7 +986,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -1008,7 +1026,7 @@ select count(status) from root.ln.wf01.wt01 where time > 2017-11-01T01:00:00 gro 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1052,7 +1070,7 @@ select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019- 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1084,7 +1102,7 @@ select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017 SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1139,7 +1157,7 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) 5. 当前暂不支持与`GROUP BY LEVEL`搭配使用。 使用如下的原始数据,接下来会给出几个事件分段查询的使用样例 -``` +```shell +-----------------------------+-------+-------+-------+--------+-------+-------+ | Time| s1| s2| s3| s4| s5| s6| +-----------------------------+-------+-------+-------+--------+-------+-------+ @@ -1159,10 +1177,10 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) ##### delta=0时的等值事件分段 使用如下sql语句 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); ``` 得到如下的查询结果,这里忽略了s6为null的行 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1174,10 +1192,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( ``` 当指定ignoreNull为false时,会将s6为null的数据也考虑进来 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); ``` 得到如下的结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1193,10 +1211,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( ##### delta!=0时的差值事件分段 使用如下sql语句 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); ``` 得到如下的查询结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1208,10 +1226,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( group by子句中的controlExpression同样支持列的表达式 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10); ``` 得到如下的查询结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1247,7 +1265,7 @@ keep表达式用来指定形成分组所需要连续满足`predict`条件的数 对于如下原始数据,下面会给出几个查询样例: -``` +```shell +-----------------------------+-------------------------+-------------------------------------+------------------------------------+ | Time|root.sg.beijing.car01.soc|root.sg.beijing.car01.charging_status|root.sg.beijing.car01.vehicle_status| +-----------------------------+-------------------------+-------------------------------------+------------------------------------+ @@ -1265,10 +1283,10 @@ keep表达式用来指定形成分组所需要连续满足`predict`条件的数 ``` 查询至少连续两行以上的charging_status=1的数据,sql语句如下: ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true); ``` 得到结果如下: -``` +```shell +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ | Time|max_time(root.sg.beijing.car01.charging_status)|count(root.sg.beijing.car01.vehicle_status)|last_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ @@ -1278,10 +1296,10 @@ select max_time(charging_status),count(vehicle_status),last_value(soc) from root ``` 当设置`ignoreNull`为false时,遇到null值为将其视为一个不满足条件的行,会结束正在计算的分组。 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false); ``` 得到如下结果,原先的分组被含null的行拆分: -``` +```shell +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ | Time|max_time(root.sg.beijing.car01.charging_status)|count(root.sg.beijing.car01.vehicle_status)|last_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ @@ -1310,7 +1328,7 @@ group by session(timeInterval) 3. 当前暂不支持与`GROUP BY LEVEL`搭配使用。 对于下面的原始数据,给出几个查询样例。 -``` +```shell +-----------------------------+-----------------+-----------+--------+------+ | Time| Device|temperature|hardware|status| +-----------------------------+-----------------+-----------+--------+------+ @@ -1341,10 +1359,10 @@ group by session(timeInterval) ``` 可以按照不同的时间单位设定时间间隔,sql语句如下: ```sql -select __endTime,count(*) from root.** group by session(1d) +select __endTime,count(*) from root.** group by session(1d); ``` 得到如下结果: -``` +```shell +-----------------------------+-----------------------------+------------------------------------+---------------------------------+-------------------------------+ | Time| __endTime|count(root.ln.wf02.wt01.temperature)|count(root.ln.wf02.wt01.hardware)|count(root.ln.wf02.wt01.status)| +-----------------------------+-----------------------------+------------------------------------+---------------------------------+-------------------------------+ @@ -1354,10 +1372,10 @@ select __endTime,count(*) from root.** group by session(1d) ``` 也可以和`HAVING`、`ALIGN BY DEVICE`共同使用 ```sql -select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` 得到如下结果,其中排除了`sum(hardware)`为0的部分 -``` +```shell +-----------------------------+-----------------+-----------------------------+-------------+ | Time| Device| __endTime|sum(hardware)| +-----------------------------+-----------------+-----------------------------+-------------+ @@ -1392,7 +1410,7 @@ group by count(controlExpression, size[,ignoreNull=true/false]) 4. 当一个分组内最终的点数不满足`size`的数量时,不会输出该分组的结果 对于下面的原始数据,给出几个查询样例。 -``` +```shell +-----------------------------+-----------+-----------------------+ | Time|root.sg.soc|root.sg.charging_status| +-----------------------------+-----------+-----------------------+ @@ -1410,10 +1428,10 @@ group by count(controlExpression, size[,ignoreNull=true/false]) ``` sql语句如下 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); ``` 得到如下结果,其中由于第二个1970-01-01T08:00:00.006+08:00到1970-01-01T08:00:00.010+08:00的窗口中包含四个点,不符合`size = 5`的条件,因此不被输出 -``` +```shell +-----------------------------+-----------------------------+--------------------------------------+ | Time| __endTime|first_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------+--------------------------------------+ @@ -1422,10 +1440,10 @@ select count(charging_stauts), first_value(soc) from root.sg group by count(char ``` 而当使用ignoreNull将null值也考虑进来时,可以得到两个点计数为5的窗口,sql如下 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` 得到如下结果 -``` +```shell +-----------------------------+-----------------------------+--------------------------------------+ | Time| __endTime|first_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------+--------------------------------------+ @@ -1445,12 +1463,12 @@ select count(charging_stauts), first_value(soc) from root.sg group by count(char **示例1:** 不同 database 下均存在名为 status 的序列, 如 "root.ln.wf01.wt01.status", "root.ln.wf02.wt02.status", 以及 "root.sgcc.wf03.wt01.status", 如果需要统计不同 database 下 status 序列的数据点个数,使用以下查询: ```sql -select count(status) from root.** group by level = 1 +select count(status) from root.** group by level = 1; ``` 运行结果为: -``` +```shell +-------------------------+---------------------------+ |count(root.ln.*.*.status)|count(root.sgcc.*.*.status)| +-------------------------+---------------------------+ @@ -1463,12 +1481,12 @@ It costs 0.003s **示例2:** 统计不同设备下 status 序列的数据点个数,可以规定 level = 3, ```sql -select count(status) from root.** group by level = 3 +select count(status) from root.** group by level = 3; ``` 运行结果为: -``` +```shell +---------------------------+---------------------------+ |count(root.*.*.wt01.status)|count(root.*.*.wt02.status)| +---------------------------+---------------------------+ @@ -1483,12 +1501,12 @@ It costs 0.003s **示例3:** 统计不同 database 下的不同设备中 status 序列的数据点个数,可以使用以下查询: ```sql -select count(status) from root.** group by level = 1, 3 +select count(status) from root.** group by level = 1, 3; ``` 运行结果为: -``` +```shell +----------------------------+----------------------------+------------------------------+ |count(root.ln.*.wt01.status)|count(root.ln.*.wt02.status)|count(root.sgcc.*.wt01.status)| +----------------------------+----------------------------+------------------------------+ @@ -1501,12 +1519,12 @@ It costs 0.003s **示例4:** 查询所有序列下温度传感器 temperature 的最大值,可以使用下列查询语句: ```sql -select max_value(temperature) from root.** group by level = 0 +select max_value(temperature) from root.** group by level = 0; ``` 运行结果: -``` +```shell +---------------------------------+ |max_value(root.*.*.*.temperature)| +---------------------------------+ @@ -1519,12 +1537,12 @@ It costs 0.013s **示例5:** 上面的查询都是针对某一个传感器,特别地,**如果想要查询某一层级下所有传感器拥有的总数据点数,则需要显式规定测点为 `*`** ```sql -select count(*) from root.ln.** group by level = 2 +select count(*) from root.ln.** group by level = 2; ``` 运行结果: -``` +```shell +----------------------+----------------------+ |count(root.*.wf01.*.*)|count(root.*.wf02.*.*)| +----------------------+----------------------+ @@ -1548,7 +1566,7 @@ select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017 结果: -``` +```shell +-----------------------------+-------------------------+ | Time|COUNT(root.ln.*.*.status)| +-----------------------------+-------------------------+ @@ -1570,7 +1588,7 @@ It costs 0.006s select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; ``` -``` +```shell +-----------------------------+-------------------------+ | Time|COUNT(root.ln.*.*.status)| +-----------------------------+-------------------------+ @@ -1661,7 +1679,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); 该查询会将具有同一个 `city` 标签值的时间序列的所有满足查询条件的点做平均值计算,计算结果如下 -``` +```shell +--------+------------------+ | city| avg(temperature)| +--------+------------------+ @@ -1692,7 +1710,7 @@ SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); 查询结果如下 -``` +```shell +--------+--------+------------------+ | city|workshop| avg(temperature)| +--------+--------+------------------+ @@ -1722,7 +1740,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 查询结果如下 -``` +```shell +-----------------------------+--------+--------+------------------+ | Time| city|workshop| avg(temperature)| +-----------------------------+--------+--------+------------------+ @@ -1762,16 +1780,16 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 下列使用方式是不正确的: ```sql - select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 - select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 + select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; + select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; ``` 2. 对`GROUP BY LEVEL`结果进行过滤时,`SELECT`和`HAVING`中出现的PATH只能有一级。 下列使用方式是不正确的: ```sql - select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 - select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 + select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; + select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` **SQL 示例:** @@ -1780,7 +1798,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 对于以下聚合结果进行过滤: - ``` + ```shell +-----------------------------+---------------------+---------------------+ | Time|count(root.test.*.s1)|count(root.test.*.s2)| +-----------------------------+---------------------+---------------------+ @@ -1798,7 +1816,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 执行结果如下: - ``` + ```shell +-----------------------------+---------------------+ | Time|count(root.test.*.s1)| +-----------------------------+---------------------+ @@ -1811,7 +1829,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS - **示例 2:** 对于以下聚合结果进行过滤: - ``` + ```shell +-----------------------------+-------------+---------+---------+ | Time| Device|count(s1)|count(s2)| +-----------------------------+-------------+---------+---------+ @@ -1834,7 +1852,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 执行结果如下: - ``` + ```shell +-----------------------------+-------------+---------+---------+ | Time| Device|count(s1)|count(s2)| +-----------------------------+-------------+---------+---------+ @@ -1899,7 +1917,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: 查询结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -1928,7 +1946,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `PREVIOUS` 填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -1950,9 +1968,9 @@ Total line number = 4 例如,原始数据如下所示: ```sql -select s1 from root.db.d1 -``` +select s1 from root.db.d1; ``` +```shell +-----------------------------+-------------+ | Time|root.db.d1.s1| +-----------------------------+-------------+ @@ -1969,9 +1987,9 @@ select s1 from root.db.d1 ```sql select avg(s1) from root.db.d1 - group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) -``` + group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m); ``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2005,7 +2023,7 @@ select avg(s1) group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) FILL(PREVIOUS); ``` -``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2039,7 +2057,7 @@ from root.db.d1 group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) FILL(PREVIOUS, 2m); ``` -``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2082,7 +2100,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `LINEAR` 填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2120,7 +2138,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `FLOAT` 类型的常量填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2143,7 +2161,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `BOOLEAN` 类型的常量填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2182,7 +2200,7 @@ Total line number = 4 SQL 语句: ```sql -select status, temperature from root.ln.wf01.wt01 limit 10 +select status, temperature from root.ln.wf01.wt01 limit 10; ``` 含义: @@ -2191,7 +2209,7 @@ select status, temperature from root.ln.wf01.wt01 limit 10 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2215,7 +2233,7 @@ It costs 0.000s SQL 语句: ```sql -select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; ``` 含义: @@ -2224,7 +2242,7 @@ select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2243,7 +2261,7 @@ It costs 0.342s SQL 语句: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:00.000 and time< 2024-07-12T00:12:00.000 limit 5 offset 3 +select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:00.000 and time< 2024-07-12T00:12:00.000 limit 5 offset 3; ``` 含义: @@ -2252,7 +2270,7 @@ select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:0 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2264,14 +2282,14 @@ select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:0 +-----------------------------+------------------------+-----------------------------+ Total line number = 5 It costs 0.070s -`` +``` - **示例 4:** `LIMIT` 子句与 `GROUP BY` 子句组合 SQL 语句: ```sql -select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3 +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3; ``` 含义: @@ -2280,7 +2298,7 @@ SQL 语句子句要求返回查询结果的第 3 至 6 行(第一行编号为 结果如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -2310,7 +2328,7 @@ It costs 0.016s SQL 语句: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; ``` 含义: @@ -2319,7 +2337,7 @@ select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 结果如下所示: -``` +```shell +-----------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.temperature| +-----------------------------+-----------------------------+ @@ -2339,7 +2357,7 @@ It costs 0.000s SQL 语句: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; ``` 含义: @@ -2348,7 +2366,7 @@ select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 结果如下所示: -``` +```shell +-----------------------------+------------------------+ | Time|root.ln.wf01.wt01.status| +-----------------------------+------------------------+ @@ -2368,12 +2386,12 @@ It costs 0.003s SQL 语句: ```sql -select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` 含义: -``` +```shell +-----------------------------+-----------------------------------+ | Time|max_value(root.ln.wf01.wt01.status)| +-----------------------------+-----------------------------------+ @@ -2394,7 +2412,7 @@ It costs 0.000s SQL 语句: ```sql -select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` 含义: @@ -2403,7 +2421,7 @@ select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 结果如下所示: -``` +```shell +-----------------------------+-----------------------------+------------------------+ | Time|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+-----------------------------+------------------------+ @@ -2431,7 +2449,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; ``` 执行结果: -``` +```shell +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ | Time|root.ln.wf02.wt02.hardware|root.ln.wf02.wt02.status|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ @@ -2463,7 +2481,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc, ``` 执行结果: -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2481,7 +2499,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc, select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; ``` 执行结果: -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2498,7 +2516,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,dev select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 结果如图所示,可以看出,`ORDER BY DEVICE ASC,TIME ASC`就是默认情况下的排序方式,由于`ASC`是默认排序顺序,此处可以省略。 -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2512,10 +2530,10 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 同样,可以在聚合查询中使用`ALIGN BY DEVICE`和`ORDER BY`子句,对聚合后的结果进行排序,示例代码如下所示: ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` 执行结果: -``` +```shell +-----------------------------+-----------------+---------------+-------------+------------------+ | Time| Device|count(hardware)|count(status)|count(temperature)| +-----------------------------+-----------------+---------------+-------------+------------------+ @@ -2534,7 +2552,7 @@ select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11 排序在通过`ASC`,`DESC`指定排序顺序的同时,可以通过`NULLS`语法来指定NULL值在排序中的优先级,`NULLS FIRST`默认NULL值在结果集的最上方,`NULLS LAST`则保证NULL值在结果集的最后。如果没有在子句中指定,则默认顺序为`ASC`,`NULLS LAST`。 对于如下的数据,将给出几个任意表达式的查询示例供参考: -``` +```shell +-----------------------------+-------------+-------+-------+--------+-------+ | Time| Device| base| score| bonus| total| +-----------------------------+-------------+-------+-------+--------+-------+ @@ -2555,11 +2573,11 @@ select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11 当需要根据基础分数score对结果进行排序时,可以直接使用 ```Sql -select score from root.** order by score desc align by device +select score from root.** order by score desc align by device; ``` 会得到如下结果 -``` +```shell +-----------------------------+---------+-----+ | Time| Device|score| +-----------------------------+---------+-----+ @@ -2578,15 +2596,15 @@ select score from root.** order by score desc align by device 当想要根据总分对结果进行排序,可以在order by子句中使用表达式进行计算 ```Sql -select score,total from root.one order by base+score+bonus desc +select score,total from root.one order by base+score+bonus desc; ``` 该sql等价于 ```Sql -select score,total from root.one order by total desc +select score,total from root.one order by total desc; ``` 得到如下结果 -``` +```shell +-----------------------------+--------------+--------------+ | Time|root.one.score|root.one.total| +-----------------------------+--------------+--------------+ @@ -2601,10 +2619,10 @@ select score,total from root.one order by total desc select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device + time desc align by device; ``` 得到如下结果 -``` +```shell +-----------------------------+----------+----+-----+-----+-----+ | Time| Device|base|score|bonus|total| +-----------------------------+----------+----+-----+-----+-----+ @@ -2625,10 +2643,10 @@ select base, score, bonus, total from root.** order by total desc NULLS Last, ``` 在order by中同样可以使用聚合查询表达式 ```Sql -select min_value(total) from root.** order by min_value(total) asc align by device +select min_value(total) from root.** order by min_value(total) asc align by device; ``` 得到如下结果 -``` +```shell +----------+----------------+ | Device|min_value(total)| +----------+----------------+ @@ -2641,11 +2659,11 @@ select min_value(total) from root.** order by min_value(total) asc align by devi ``` 当在查询中指定多列,未被排序的列会随着行和排序列一起改变顺序,当排序列相同时行的顺序和具体实现有关(没有固定顺序) ```Sql -select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; ``` 得到结果如下 -· -``` + +```shell +----------+----------------+---------------+ | Device|min_value(total)|max_value(base)| +----------+----------------+---------------+ @@ -2659,10 +2677,10 @@ select min_value(total),max_value(base) from root.** order by max_value(total) d Order by device, time可以和order by expression共同使用 ```Sql -select score from root.** order by device asc, score desc, time asc align by device +select score from root.** order by device asc, score desc, time asc align by device; ``` 会得到如下结果 -``` +```shell +-----------------------------+---------+-----+ | Time| Device|score| +-----------------------------+---------+-----+ @@ -2705,7 +2723,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; 执行如下: -``` +```shell +-----------------------------+-----------------+-----------+------+--------+ | Time| Device|temperature|status|hardware| +-----------------------------+-----------------+-----------+------+--------+ @@ -2772,8 +2790,10 @@ intoItem 下面通过示例进一步说明: - **示例 1**(按时间对齐) +```sql +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +``` ```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +--------------+-------------------+--------+ | source column| target timeseries| written| +--------------+-------------------+--------+ @@ -2799,8 +2819,10 @@ It costs 0.725s > - `written` 表示预期写入的数据量。 - **示例 2**(按时间对齐) +```sql +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +``` ```shell -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +--------------------------------------+-------------------------+--------+ | source column| target timeseries| written| +--------------------------------------+-------------------------+--------+ @@ -2815,8 +2837,10 @@ It costs 0.375s 该语句将聚合查询的结果存储到指定序列中。 - **示例 3**(按设备对齐) +```sql +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +``` ```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +--------------+--------------+-------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+-------------------+--------+ @@ -2837,8 +2861,10 @@ It costs 0.625s > 按设备对齐查询时,`CLI` 展示的结果集多出一列 `source device` 列表示查询的设备。 - **示例 4**(按设备对齐) +```sql +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +``` ```shell -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +--------------+--------------+------------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+------------------------+--------+ @@ -2983,8 +3009,10 @@ select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from #### 实现 IoTDB 内部 ETL 对原始数据进行 ETL 处理后写入新序列。 +```sql +SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +``` ```shell -IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +--------------+-------------------+---------------------------+--------+ | source device| source column| target timeseries| written| +--------------+-------------------+---------------------------+--------+ @@ -3001,8 +3029,10 @@ IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) #### 查询结果存储 将查询结果进行持久化存储,起到类似物化视图的作用。 +```sql +SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +``` ```shell -IOTDB > SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +--------------------------+-----------------------------+--------+ | source column| target timeseries| written| +--------------------------+-----------------------------+--------+ @@ -3020,8 +3050,10 @@ It costs 0.115s **注意:** 建议配合使用 `LIMIT & OFFSET` 子句或 `WHERE` 子句(时间过滤条件)对数据进行分批,防止单次操作的数据量过大。 +```sql +SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +``` ```shell -IOTDB > SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +--------------------------+----------------------+--------+ | source column| target timeseries| written| +--------------------------+----------------------+--------+ diff --git a/src/zh/UserGuide/latest/Basic-Concept/Query-Data_timecho.md b/src/zh/UserGuide/latest/Basic-Concept/Query-Data_timecho.md index 8d8792e24..c90a119ec 100644 --- a/src/zh/UserGuide/latest/Basic-Concept/Query-Data_timecho.md +++ b/src/zh/UserGuide/latest/Basic-Concept/Query-Data_timecho.md @@ -126,7 +126,7 @@ select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.temperature| +-----------------------------+-----------------------------+ @@ -157,7 +157,7 @@ select status, temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05: 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -188,7 +188,7 @@ select status, temperature from root.ln.wf01.wt01 where (time > 2017-11-01T00:05 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -220,7 +220,7 @@ select wf01.wt01.status, wf02.wt02.hardware from root.ln where (time > 2017-11-0 该 SQL 语句的执行结果如下: -``` +```shell +-----------------------------+------------------------+--------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf02.wt02.hardware| +-----------------------------+------------------------+--------------------------+ @@ -248,7 +248,7 @@ select * from root.ln.** where time > 1 order by time desc limit 10; 语句执行的结果为: -``` +```shell +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ | Time|root.ln.wf02.wt02.hardware|root.ln.wf02.wt02.status|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ @@ -428,7 +428,7 @@ from root.sg1; 运行结果: -``` +```shell +-----------------------------+----------+----------+----------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Time|root.sg1.a|root.sg1.b|((((root.sg1.a + 1) * 2) - 1) % 2) + 1.5|sin(root.sg1.a + sin(root.sg1.a + sin(root.sg1.b)))|(-root.sg1.a + root.sg1.b * ((sin(root.sg1.a + root.sg1.b) * sin(root.sg1.a + root.sg1.b)) + (cos(root.sg1.a + root.sg1.b) * cos(root.sg1.a + root.sg1.b)))) + 1| +-----------------------------+----------+----------+----------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -446,12 +446,12 @@ It costs 0.048s **示例 2:** ```sql -select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; ``` 运行结果: -``` +```shell +-----------------------------+----------------------------------------------+ | Time|((root.sg.a + root.sg.b) * 2) + sin(root.sg.a)| +-----------------------------+----------------------------------------------+ @@ -472,12 +472,12 @@ It costs 0.011s **示例 3:** ```sql -select (a + *) / 2 from root.sg1 +select (a + *) / 2 from root.sg1; ``` 运行结果: -``` +```shell +-----------------------------+-----------------------------+-----------------------------+ | Time|(root.sg1.a + root.sg1.a) / 2|(root.sg1.a + root.sg1.b) / 2| +-----------------------------+-----------------------------+-----------------------------+ @@ -494,12 +494,12 @@ It costs 0.011s **示例 4:** ```sql -select (a + b) * 3 from root.sg, root.ln +select (a + b) * 3 from root.sg, root.ln; ``` 运行结果: -``` +```shell +-----------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ | Time|(root.sg.a + root.sg.b) * 3|(root.sg.a + root.ln.b) * 3|(root.ln.a + root.sg.b) * 3|(root.ln.a + root.ln.b) * 3| +-----------------------------+---------------------------+---------------------------+---------------------------+---------------------------+ @@ -538,7 +538,7 @@ from root.ln.wf01.wt01; 运行结果: -``` +```shell +----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+--------------------------------------------------------------------+ |avg(root.ln.wf01.wt01.temperature)|sin(avg(root.ln.wf01.wt01.temperature))|avg(root.ln.wf01.wt01.temperature) + 1|-sum(root.ln.wf01.wt01.hardware)|avg(root.ln.wf01.wt01.temperature) + sum(root.ln.wf01.wt01.hardware)| +----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+--------------------------------------------------------------------+ @@ -553,12 +553,12 @@ It costs 0.009s ```sql select avg(*), (avg(*) + 1) * 3 / 2 -1 -from root.sg1 +from root.sg1; ``` 运行结果: -``` +```shell +---------------+---------------+-------------------------------------+-------------------------------------+ |avg(root.sg1.a)|avg(root.sg1.b)|(avg(root.sg1.a) + 1) * 3 / 2 - 1 |(avg(root.sg1.b) + 1) * 3 / 2 - 1 | +---------------+---------------+-------------------------------------+-------------------------------------+ @@ -582,7 +582,7 @@ GROUP BY([10, 90), 10ms); 运行结果: -``` +```shell +-----------------------------+----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+----------+ | Time|avg(root.ln.wf01.wt01.temperature)|sin(avg(root.ln.wf01.wt01.temperature))|avg(root.ln.wf01.wt01.temperature) + 1|-sum(root.ln.wf01.wt01.hardware)|custom_sum| +-----------------------------+----------------------------------+---------------------------------------+--------------------------------------+--------------------------------+----------+ @@ -617,7 +617,7 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < - 结果集为四列的结构: - ``` + ```shell +----+----------+-----+--------+ |Time|timeseries|value|dataType| +----+----------+-----+--------+ @@ -627,8 +627,10 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < **示例 1:** 查询 root.ln.wf01.wt01.status 的最新数据点 +```sql + select last status from root.ln.wf01.wt01; ``` -IoTDB> select last status from root.ln.wf01.wt01 +```shell +-----------------------------+------------------------+-----+--------+ | Time| timeseries|value|dataType| +-----------------------------+------------------------+-----+--------+ @@ -640,8 +642,10 @@ It costs 0.000s **示例 2:** 查询 root.ln.wf01.wt01 下 status,temperature 时间戳大于等于 2017-11-07T23:50:00 的最新数据点。 +```sql + select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; ``` -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -654,8 +658,10 @@ It costs 0.002s **示例 3:** 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照序列名降序排列。 +```sql + select last * from root.ln.wf01.wt01 order by timeseries desc; ``` -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -668,8 +674,10 @@ It costs 0.002s **示例 4:** 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照dataType降序排列。 +```sql + select last * from root.ln.wf01.wt01 order by dataType desc; ``` -IoTDB> select last * from root.ln.wf01.wt01 order by dataType desc; +```shell +-----------------------------+-----------------------------+---------+--------+ | Time| timeseries| value|dataType| +-----------------------------+-----------------------------+---------+--------+ @@ -682,8 +690,10 @@ It costs 0.002s **注意:** 可以通过函数组合方式实现其他过滤条件查询最新点的需求,例如 +```sql + select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device; ``` -IoTDB> select max_time(*), last_value(*) from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 and status = false align by device +```shell +-----------------+---------------------+----------------+-----------------------+------------------+ | Device|max_time(temperature)|max_time(status)|last_value(temperature)|last_value(status)| +-----------------+---------------------+----------------+-----------------------+------------------+ @@ -793,8 +803,10 @@ It costs 0.021s **示例 1:** 查询 `root.sg.d1` 下 `value` 含有`'cc'`的数据。 +```sql + select * from root.sg.d1 where value like '%cc%'; ``` -IoTDB> select * from root.sg.d1 where value like '%cc%' +```shell +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -807,8 +819,10 @@ It costs 0.002s **示例 2:** 查询 `root.sg.d1` 下 `value` 中间为 `'b'`、前后为任意单个字符的数据。 +```sql + select * from root.sg.device where value like '_b_'; ``` -IoTDB> select * from root.sg.device where value like '_b_' +```shell +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -824,7 +838,7 @@ It costs 0.002s **常见的正则匹配举例:** -``` +```shell 长度为3-20的所有字符:^.{3,20}$ 大写英文字符:^[A-Z]+$ 数字和英文字符:^[A-Za-z0-9]+$ @@ -833,8 +847,10 @@ It costs 0.002s **示例 1:** 查询 root.sg.d1 下 value 值为26个英文字符组成的字符串。 +```sql + select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +``` ```shell -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -847,8 +863,10 @@ It costs 0.002s **示例 2:** 查询 root.sg.d1 下 value 值为26个小写英文字符组成的字符串且时间大于100的。 +```sql + select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; +``` ```shell -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-----------------------------+----------------+ | Time|root.sg.d1.value| +-----------------------------+----------------+ @@ -903,7 +921,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -943,7 +961,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -968,7 +986,7 @@ select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2 SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -1008,7 +1026,7 @@ select count(status) from root.ln.wf01.wt01 where time > 2017-11-01T01:00:00 gro 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1052,7 +1070,7 @@ select count(status) from root.ln.wf01.wt01 group by([2017-10-31T00:00:00, 2019- 每个时间间隔窗口内都有数据,SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1084,7 +1102,7 @@ select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017 SQL 执行后的结果集如下所示: -``` +```shell +-----------------------------+-------------------------------+ | Time|count(root.ln.wf01.wt01.status)| +-----------------------------+-------------------------------+ @@ -1139,7 +1157,7 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) 5. 当前暂不支持与`GROUP BY LEVEL`搭配使用。 使用如下的原始数据,接下来会给出几个事件分段查询的使用样例 -``` +```shell +-----------------------------+-------+-------+-------+--------+-------+-------+ | Time| s1| s2| s3| s4| s5| s6| +-----------------------------+-------+-------+-------+--------+-------+-------+ @@ -1159,10 +1177,10 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) ##### delta=0时的等值事件分段 使用如下sql语句 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); ``` 得到如下的查询结果,这里忽略了s6为null的行 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1174,10 +1192,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( ``` 当指定ignoreNull为false时,会将s6为null的数据也考虑进来 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); ``` 得到如下的结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1193,10 +1211,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( ##### delta!=0时的差值事件分段 使用如下sql语句 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); ``` 得到如下的查询结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1208,10 +1226,10 @@ select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation( group by子句中的controlExpression同样支持列的表达式 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6+s5, 10); ``` 得到如下的查询结果 -``` +```shell +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ | Time| __endTime|avg(root.sg.d.s1)|count(root.sg.d.s2)|sum(root.sg.d.s3)| +-----------------------------+-----------------------------+-----------------+-------------------+-----------------+ @@ -1247,7 +1265,7 @@ keep表达式用来指定形成分组所需要连续满足`predict`条件的数 对于如下原始数据,下面会给出几个查询样例: -``` +```shell +-----------------------------+-------------------------+-------------------------------------+------------------------------------+ | Time|root.sg.beijing.car01.soc|root.sg.beijing.car01.charging_status|root.sg.beijing.car01.vehicle_status| +-----------------------------+-------------------------+-------------------------------------+------------------------------------+ @@ -1265,10 +1283,10 @@ keep表达式用来指定形成分组所需要连续满足`predict`条件的数 ``` 查询至少连续两行以上的charging_status=1的数据,sql语句如下: ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true); ``` 得到结果如下: -``` +```shell +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ | Time|max_time(root.sg.beijing.car01.charging_status)|count(root.sg.beijing.car01.vehicle_status)|last_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ @@ -1278,10 +1296,10 @@ select max_time(charging_status),count(vehicle_status),last_value(soc) from root ``` 当设置`ignoreNull`为false时,遇到null值为将其视为一个不满足条件的行,会结束正在计算的分组。 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false); ``` 得到如下结果,原先的分组被含null的行拆分: -``` +```shell +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ | Time|max_time(root.sg.beijing.car01.charging_status)|count(root.sg.beijing.car01.vehicle_status)|last_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------------------------+-------------------------------------------+-------------------------------------+ @@ -1310,7 +1328,7 @@ group by session(timeInterval) 3. 当前暂不支持与`GROUP BY LEVEL`搭配使用。 对于下面的原始数据,给出几个查询样例。 -``` +```shell +-----------------------------+-----------------+-----------+--------+------+ | Time| Device|temperature|hardware|status| +-----------------------------+-----------------+-----------+--------+------+ @@ -1341,10 +1359,10 @@ group by session(timeInterval) ``` 可以按照不同的时间单位设定时间间隔,sql语句如下: ```sql -select __endTime,count(*) from root.** group by session(1d) +select __endTime,count(*) from root.** group by session(1d); ``` 得到如下结果: -``` +```shell +-----------------------------+-----------------------------+------------------------------------+---------------------------------+-------------------------------+ | Time| __endTime|count(root.ln.wf02.wt01.temperature)|count(root.ln.wf02.wt01.hardware)|count(root.ln.wf02.wt01.status)| +-----------------------------+-----------------------------+------------------------------------+---------------------------------+-------------------------------+ @@ -1354,10 +1372,10 @@ select __endTime,count(*) from root.** group by session(1d) ``` 也可以和`HAVING`、`ALIGN BY DEVICE`共同使用 ```sql -select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` 得到如下结果,其中排除了`sum(hardware)`为0的部分 -``` +```shell +-----------------------------+-----------------+-----------------------------+-------------+ | Time| Device| __endTime|sum(hardware)| +-----------------------------+-----------------+-----------------------------+-------------+ @@ -1392,7 +1410,7 @@ group by count(controlExpression, size[,ignoreNull=true/false]) 4. 当一个分组内最终的点数不满足`size`的数量时,不会输出该分组的结果 对于下面的原始数据,给出几个查询样例。 -``` +```shell +-----------------------------+-----------+-----------------------+ | Time|root.sg.soc|root.sg.charging_status| +-----------------------------+-----------+-----------------------+ @@ -1410,10 +1428,10 @@ group by count(controlExpression, size[,ignoreNull=true/false]) ``` sql语句如下 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); ``` 得到如下结果,其中由于第二个1970-01-01T08:00:00.006+08:00到1970-01-01T08:00:00.010+08:00的窗口中包含四个点,不符合`size = 5`的条件,因此不被输出 -``` +```shell +-----------------------------+-----------------------------+--------------------------------------+ | Time| __endTime|first_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------+--------------------------------------+ @@ -1422,10 +1440,10 @@ select count(charging_stauts), first_value(soc) from root.sg group by count(char ``` 而当使用ignoreNull将null值也考虑进来时,可以得到两个点计数为5的窗口,sql如下 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` 得到如下结果 -``` +```shell +-----------------------------+-----------------------------+--------------------------------------+ | Time| __endTime|first_value(root.sg.beijing.car01.soc)| +-----------------------------+-----------------------------+--------------------------------------+ @@ -1445,12 +1463,12 @@ select count(charging_stauts), first_value(soc) from root.sg group by count(char **示例1:** 不同 database 下均存在名为 status 的序列, 如 "root.ln.wf01.wt01.status", "root.ln.wf02.wt02.status", 以及 "root.sgcc.wf03.wt01.status", 如果需要统计不同 database 下 status 序列的数据点个数,使用以下查询: ```sql -select count(status) from root.** group by level = 1 +select count(status) from root.** group by level = 1; ``` 运行结果为: -``` +```shell +-------------------------+---------------------------+ |count(root.ln.*.*.status)|count(root.sgcc.*.*.status)| +-------------------------+---------------------------+ @@ -1463,12 +1481,12 @@ It costs 0.003s **示例2:** 统计不同设备下 status 序列的数据点个数,可以规定 level = 3, ```sql -select count(status) from root.** group by level = 3 +select count(status) from root.** group by level = 3; ``` 运行结果为: -``` +```shell +---------------------------+---------------------------+ |count(root.*.*.wt01.status)|count(root.*.*.wt02.status)| +---------------------------+---------------------------+ @@ -1483,12 +1501,12 @@ It costs 0.003s **示例3:** 统计不同 database 下的不同设备中 status 序列的数据点个数,可以使用以下查询: ```sql -select count(status) from root.** group by level = 1, 3 +select count(status) from root.** group by level = 1, 3; ``` 运行结果为: -``` +```shell +----------------------------+----------------------------+------------------------------+ |count(root.ln.*.wt01.status)|count(root.ln.*.wt02.status)|count(root.sgcc.*.wt01.status)| +----------------------------+----------------------------+------------------------------+ @@ -1501,12 +1519,12 @@ It costs 0.003s **示例4:** 查询所有序列下温度传感器 temperature 的最大值,可以使用下列查询语句: ```sql -select max_value(temperature) from root.** group by level = 0 +select max_value(temperature) from root.** group by level = 0; ``` 运行结果: -``` +```shell +---------------------------------+ |max_value(root.*.*.*.temperature)| +---------------------------------+ @@ -1519,12 +1537,12 @@ It costs 0.013s **示例5:** 上面的查询都是针对某一个传感器,特别地,**如果想要查询某一层级下所有传感器拥有的总数据点数,则需要显式规定测点为 `*`** ```sql -select count(*) from root.ln.** group by level = 2 +select count(*) from root.ln.** group by level = 2; ``` 运行结果: -``` +```shell +----------------------+----------------------+ |count(root.*.wf01.*.*)|count(root.*.wf02.*.*)| +----------------------+----------------------+ @@ -1548,7 +1566,7 @@ select count(status) from root.ln.wf01.wt01 group by ((2017-11-01T00:00:00, 2017 结果: -``` +```shell +-----------------------------+-------------------------+ | Time|COUNT(root.ln.*.*.status)| +-----------------------------+-------------------------+ @@ -1570,7 +1588,7 @@ It costs 0.006s select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017-11-07 23:00:00), 3h, 1d), level=1; ``` -``` +```shell +-----------------------------+-------------------------+ | Time|COUNT(root.ln.*.*.status)| +-----------------------------+-------------------------+ @@ -1661,7 +1679,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY TAGS(city); 该查询会将具有同一个 `city` 标签值的时间序列的所有满足查询条件的点做平均值计算,计算结果如下 -``` +```shell +--------+------------------+ | city| avg(temperature)| +--------+------------------+ @@ -1692,7 +1710,7 @@ SELECT avg(temperature) FROM root.factory1.** GROUP BY TAGS(city, workshop); 查询结果如下 -``` +```shell +--------+--------+------------------+ | city|workshop| avg(temperature)| +--------+--------+------------------+ @@ -1722,7 +1740,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 查询结果如下 -``` +```shell +-----------------------------+--------+--------+------------------+ | Time| city|workshop| avg(temperature)| +-----------------------------+--------+--------+------------------+ @@ -1762,16 +1780,16 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 下列使用方式是不正确的: ```sql - select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 - select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 + select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; + select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; ``` 2. 对`GROUP BY LEVEL`结果进行过滤时,`SELECT`和`HAVING`中出现的PATH只能有一级。 下列使用方式是不正确的: ```sql - select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 - select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 + select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; + select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` **SQL 示例:** @@ -1780,7 +1798,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 对于以下聚合结果进行过滤: - ``` + ```shell +-----------------------------+---------------------+---------------------+ | Time|count(root.test.*.s1)|count(root.test.*.s2)| +-----------------------------+---------------------+---------------------+ @@ -1798,7 +1816,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 执行结果如下: - ``` + ```shell +-----------------------------+---------------------+ | Time|count(root.test.*.s1)| +-----------------------------+---------------------+ @@ -1811,7 +1829,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS - **示例 2:** 对于以下聚合结果进行过滤: - ``` + ```shell +-----------------------------+-------------+---------+---------+ | Time| Device|count(s1)|count(s2)| +-----------------------------+-------------+---------+---------+ @@ -1834,7 +1852,7 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS 执行结果如下: - ``` + ```shell +-----------------------------+-------------+---------+---------+ | Time| Device|count(s1)|count(s2)| +-----------------------------+-------------+---------+---------+ @@ -1899,7 +1917,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: 查询结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -1928,7 +1946,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `PREVIOUS` 填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -1950,9 +1968,9 @@ Total line number = 4 例如,原始数据如下所示: ```sql -select s1 from root.db.d1 -``` +select s1 from root.db.d1; ``` +```shell +-----------------------------+-------------+ | Time|root.db.d1.s1| +-----------------------------+-------------+ @@ -1969,9 +1987,9 @@ select s1 from root.db.d1 ```sql select avg(s1) from root.db.d1 - group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) -``` + group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m); ``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2005,7 +2023,7 @@ select avg(s1) group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) FILL(PREVIOUS); ``` -``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2039,7 +2057,7 @@ from root.db.d1 group by([2023-11-08T16:40:00.008+08:00, 2023-11-08T16:50:00.008+08:00), 1m) FILL(PREVIOUS, 2m); ``` -``` +```shell +-----------------------------+------------------+ | Time|avg(root.db.d1.s1)| +-----------------------------+------------------+ @@ -2082,7 +2100,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `LINEAR` 填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2120,7 +2138,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `FLOAT` 类型的常量填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2143,7 +2161,7 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: `BOOLEAN` 类型的常量填充后的结果如下: -``` +```shell +-----------------------------+-------------------------------+--------------------------+ | Time|root.sgcc.wf03.wt01.temperature|root.sgcc.wf03.wt01.status| +-----------------------------+-------------------------------+--------------------------+ @@ -2182,7 +2200,7 @@ Total line number = 4 SQL 语句: ```sql -select status, temperature from root.ln.wf01.wt01 limit 10 +select status, temperature from root.ln.wf01.wt01 limit 10; ``` 含义: @@ -2191,7 +2209,7 @@ select status, temperature from root.ln.wf01.wt01 limit 10 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2215,7 +2233,7 @@ It costs 0.000s SQL 语句: ```sql -select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; ``` 含义: @@ -2224,7 +2242,7 @@ select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2243,7 +2261,7 @@ It costs 0.342s SQL 语句: ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:00.000 and time< 2024-07-12T00:12:00.000 limit 5 offset 3 +select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:00.000 and time< 2024-07-12T00:12:00.000 limit 5 offset 3; ``` 含义: @@ -2252,7 +2270,7 @@ select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:0 结果如下所示: -``` +```shell +-----------------------------+------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.status|root.ln.wf01.wt01.temperature| +-----------------------------+------------------------+-----------------------------+ @@ -2264,14 +2282,14 @@ select status,temperature from root.ln.wf01.wt01 where time > 2024-07-07T00:05:0 +-----------------------------+------------------------+-----------------------------+ Total line number = 5 It costs 0.070s -`` +```` - **示例 4:** `LIMIT` 子句与 `GROUP BY` 子句组合 SQL 语句: ```sql -select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3 +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3; ``` 含义: @@ -2280,7 +2298,7 @@ SQL 语句子句要求返回查询结果的第 3 至 6 行(第一行编号为 结果如下所示: -``` +```shell +-----------------------------+-------------------------------+----------------------------------------+ | Time|count(root.ln.wf01.wt01.status)|max_value(root.ln.wf01.wt01.temperature)| +-----------------------------+-------------------------------+----------------------------------------+ @@ -2310,7 +2328,7 @@ It costs 0.016s SQL 语句: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; ``` 含义: @@ -2319,7 +2337,7 @@ select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 结果如下所示: -``` +```shell +-----------------------------+-----------------------------+ | Time|root.ln.wf01.wt01.temperature| +-----------------------------+-----------------------------+ @@ -2339,7 +2357,7 @@ It costs 0.000s SQL 语句: ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; ``` 含义: @@ -2348,7 +2366,7 @@ select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 结果如下所示: -``` +```shell +-----------------------------+------------------------+ | Time|root.ln.wf01.wt01.status| +-----------------------------+------------------------+ @@ -2368,12 +2386,12 @@ It costs 0.003s SQL 语句: ```sql -select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` 含义: -``` +```shell +-----------------------------+-----------------------------------+ | Time|max_value(root.ln.wf01.wt01.status)| +-----------------------------+-----------------------------------+ @@ -2394,7 +2412,7 @@ It costs 0.000s SQL 语句: ```sql -select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` 含义: @@ -2403,7 +2421,7 @@ select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 结果如下所示: -``` +```shell +-----------------------------+-----------------------------+------------------------+ | Time|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+-----------------------------+------------------------+ @@ -2431,7 +2449,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time desc; ``` 执行结果: -``` +```shell +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ | Time|root.ln.wf02.wt02.hardware|root.ln.wf02.wt02.status|root.ln.wf01.wt01.temperature|root.ln.wf01.wt01.status| +-----------------------------+--------------------------+------------------------+-----------------------------+------------------------+ @@ -2463,7 +2481,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc, ``` 执行结果: -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2481,7 +2499,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by device desc, select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,device desc align by device; ``` 执行结果: -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2498,7 +2516,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 order by time asc,dev select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 结果如图所示,可以看出,`ORDER BY DEVICE ASC,TIME ASC`就是默认情况下的排序方式,由于`ASC`是默认排序顺序,此处可以省略。 -``` +```shell +-----------------------------+-----------------+--------+------+-----------+ | Time| Device|hardware|status|temperature| +-----------------------------+-----------------+--------+------+-----------+ @@ -2512,10 +2530,10 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 同样,可以在聚合查询中使用`ALIGN BY DEVICE`和`ORDER BY`子句,对聚合后的结果进行排序,示例代码如下所示: ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` 执行结果: -``` +```shell +-----------------------------+-----------------+---------------+-------------+------------------+ | Time| Device|count(hardware)|count(status)|count(temperature)| +-----------------------------+-----------------+---------------+-------------+------------------+ @@ -2534,7 +2552,7 @@ select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11 排序在通过`ASC`,`DESC`指定排序顺序的同时,可以通过`NULLS`语法来指定NULL值在排序中的优先级,`NULLS FIRST`默认NULL值在结果集的最上方,`NULLS LAST`则保证NULL值在结果集的最后。如果没有在子句中指定,则默认顺序为`ASC`,`NULLS LAST`。 对于如下的数据,将给出几个任意表达式的查询示例供参考: -``` +```shell +-----------------------------+-------------+-------+-------+--------+-------+ | Time| Device| base| score| bonus| total| +-----------------------------+-------------+-------+-------+--------+-------+ @@ -2555,11 +2573,11 @@ select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11 当需要根据基础分数score对结果进行排序时,可以直接使用 ```Sql -select score from root.** order by score desc align by device +select score from root.** order by score desc align by device; ``` 会得到如下结果 -``` +```shell +-----------------------------+---------+-----+ | Time| Device|score| +-----------------------------+---------+-----+ @@ -2578,15 +2596,15 @@ select score from root.** order by score desc align by device 当想要根据总分对结果进行排序,可以在order by子句中使用表达式进行计算 ```Sql -select score,total from root.one order by base+score+bonus desc +select score,total from root.one order by base+score+bonus desc; ``` 该sql等价于 ```Sql -select score,total from root.one order by total desc +select score,total from root.one order by total desc; ``` 得到如下结果 -``` +```shell +-----------------------------+--------------+--------------+ | Time|root.one.score|root.one.total| +-----------------------------+--------------+--------------+ @@ -2601,10 +2619,10 @@ select score,total from root.one order by total desc select base, score, bonus, total from root.** order by total desc NULLS Last, score desc NULLS Last, bonus desc NULLS Last, - time desc align by device + time desc align by device; ``` 得到如下结果 -``` +```shell +-----------------------------+----------+----+-----+-----+-----+ | Time| Device|base|score|bonus|total| +-----------------------------+----------+----+-----+-----+-----+ @@ -2625,10 +2643,10 @@ select base, score, bonus, total from root.** order by total desc NULLS Last, ``` 在order by中同样可以使用聚合查询表达式 ```Sql -select min_value(total) from root.** order by min_value(total) asc align by device +select min_value(total) from root.** order by min_value(total) asc align by device; ``` 得到如下结果 -``` +```shell +----------+----------------+ | Device|min_value(total)| +----------+----------------+ @@ -2641,11 +2659,11 @@ select min_value(total) from root.** order by min_value(total) asc align by devi ``` 当在查询中指定多列,未被排序的列会随着行和排序列一起改变顺序,当排序列相同时行的顺序和具体实现有关(没有固定顺序) ```Sql -select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device +select min_value(total),max_value(base) from root.** order by max_value(total) desc align by device; ``` 得到结果如下 -· -``` + +```shell +----------+----------------+---------------+ | Device|min_value(total)|max_value(base)| +----------+----------------+---------------+ @@ -2659,10 +2677,10 @@ select min_value(total),max_value(base) from root.** order by max_value(total) d Order by device, time可以和order by expression共同使用 ```Sql -select score from root.** order by device asc, score desc, time asc align by device +select score from root.** order by device asc, score desc, time asc align by device; ``` 会得到如下结果 -``` +```shell +-----------------------------+---------+-----+ | Time| Device|score| +-----------------------------+---------+-----+ @@ -2705,7 +2723,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; 执行如下: -``` +```shell +-----------------------------+-----------------+-----------+------+--------+ | Time| Device|temperature|status|hardware| +-----------------------------+-----------------+-----------+------+--------+ @@ -2772,8 +2790,10 @@ intoItem 下面通过示例进一步说明: - **示例 1**(按时间对齐) +```sql + select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +``` ```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +--------------+-------------------+--------+ | source column| target timeseries| written| +--------------+-------------------+--------+ @@ -2799,8 +2819,10 @@ It costs 0.725s > - `written` 表示预期写入的数据量。 - **示例 2**(按时间对齐) +```sql + select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +``` ```shell -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +--------------------------------------+-------------------------+--------+ | source column| target timeseries| written| +--------------------------------------+-------------------------+--------+ @@ -2815,8 +2837,10 @@ It costs 0.375s 该语句将聚合查询的结果存储到指定序列中。 - **示例 3**(按设备对齐) +```sql + select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +``` ```shell -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +--------------+--------------+-------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+-------------------+--------+ @@ -2837,8 +2861,10 @@ It costs 0.625s > 按设备对齐查询时,`CLI` 展示的结果集多出一列 `source device` 列表示查询的设备。 - **示例 4**(按设备对齐) +```sql + select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +``` ```shell -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +--------------+--------------+------------------------+--------+ | source device| source column| target timeseries| written| +--------------+--------------+------------------------+--------+ @@ -2983,8 +3009,10 @@ select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from #### 实现 IoTDB 内部 ETL 对原始数据进行 ETL 处理后写入新序列。 +```sql +SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +``` ```shell -IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) FROM root.sg.* ALIGN BY DEIVCE; +--------------+-------------------+---------------------------+--------+ | source device| source column| target timeseries| written| +--------------+-------------------+---------------------------+--------+ @@ -3001,8 +3029,10 @@ IOTDB > SELECT preprocess_udf(s1, s2) INTO ::(preprocessed_s1, preprocessed_s2) #### 查询结果存储 将查询结果进行持久化存储,起到类似物化视图的作用。 +```sql +SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +``` ```shell -IOTDB > SELECT count(s1), last_value(s1) INTO root.sg.agg_${2}(count_s1, last_value_s1) FROM root.sg1.d1 GROUP BY ([0, 10000), 10ms); +--------------------------+-----------------------------+--------+ | source column| target timeseries| written| +--------------------------+-----------------------------+--------+ @@ -3020,8 +3050,10 @@ It costs 0.115s **注意:** 建议配合使用 `LIMIT & OFFSET` 子句或 `WHERE` 子句(时间过滤条件)对数据进行分批,防止单次操作的数据量过大。 +```sql +SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +``` ```shell -IOTDB > SELECT s1, s2 INTO ALIGNED root.sg1.aligned_d(s1, s2) FROM root.sg1.non_aligned_d WHERE time >= 0 and time < 10000; +--------------------------+----------------------+--------+ | source column| target timeseries| written| +--------------------------+----------------------+--------+ diff --git a/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_apache.md b/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_apache.md index 98a2bde25..cb4b7f3ee 100644 --- a/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_apache.md +++ b/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_apache.md @@ -7,32 +7,32 @@ #### 创建数据库 ```sql -CREATE DATABASE root.ln +CREATE DATABASE root.ln; ``` #### 查看数据库 ```sql -show databases -show databases root.* -show databases root.** +show databases; +show databases root.*; +show databases root.**; ``` #### 删除数据库 ```sql -DELETE DATABASE root.ln -DELETE DATABASE root.sgcc -DELETE DATABASE root.** +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +DELETE DATABASE root.**; ``` #### 统计数据库数量 ```sql -count databases -count databases root.* -count databases root.sgcc.* -count databases root.sgcc +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` ### 1.2 时间序列管理 @@ -40,161 +40,161 @@ count databases root.sgcc #### 创建时间序列 ```sql -create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - 简化版 ```sql -create timeseries root.ln.wf01.wt01.status BOOLEAN -create timeseries root.ln.wf01.wt01.temperature FLOAT -create timeseries root.ln.wf02.wt02.hardware TEXT -create timeseries root.ln.wf02.wt02.status BOOLEAN -create timeseries root.sgcc.wf03.wt01.status BOOLEAN -create timeseries root.sgcc.wf03.wt01.temperature FLOAT +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` - 错误提示 ```sql -create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN -> error: encoding TS_2DIFF does not support BOOLEAN +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN; +> error: encoding TS_2DIFF does not support BOOLEAN; ``` #### 创建对齐时间序列 ```sql -CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` #### 删除时间序列 ```sql -delete timeseries root.ln.wf01.wt01.status -delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -delete timeseries root.ln.wf02.* -drop timeseries root.ln.wf02.* +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` #### 查看时间序列 ```sql -SHOW TIMESERIES -SHOW TIMESERIES -SHOW TIMESERIES root.** -SHOW TIMESERIES root.ln.** -SHOW TIMESERIES root.ln.** limit 10 offset 10 -SHOW TIMESERIES root.ln.** where timeseries contains 'wf01.wt' -SHOW TIMESERIES root.ln.** where dataType=FLOAT +SHOW TIMESERIES; +SHOW TIMESERIES ; +SHOW TIMESERIES root.**; +SHOW TIMESERIES root.ln.**; +SHOW TIMESERIES root.ln.** limit 10 offset 10; +SHOW TIMESERIES root.ln.** where timeseries contains 'wf01.wt'; +SHOW TIMESERIES root.ln.** where dataType=FLOAT; SHOW TIMESERIES root.ln.** where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -SHOW LATEST TIMESERIES +SHOW LATEST TIMESERIES; ``` #### 统计时间序列数量 ```sql -COUNT TIMESERIES root.** -COUNT TIMESERIES root.ln.** -COUNT TIMESERIES root.ln.*.*.status -COUNT TIMESERIES root.ln.wf01.wt01.status -COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc'; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c'; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c'; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; COUNT TIMESERIES root.** WHERE time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -COUNT TIMESERIES root.** GROUP BY LEVEL=1 -COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` #### 标签点管理 ```sql -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` - 重命名标签或属性 ```sql -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` - 重新设置标签或属性的值 ```sql -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` - 删除已经存在的标签或属性 ```sql -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` - 添加新的标签 ```sql -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` - 添加新的属性 ```sql -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` - 更新插入别名,标签和属性 ```sql -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4); ``` - 使用标签作为过滤条件查询时间序列 ```sql -SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause +SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; ``` 返回给定路径的下的所有满足条件的时间序列信息: ```sql -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` - 使用标签作为过滤条件统计时间序列数量 ```sql -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` 返回给定路径的下的所有满足条件的时间序列的数量: ```sql -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` 创建对齐时间序列: ```sql -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` 支持查询: ```sql -show timeseries where TAGS(tag1)='v1' +show timeseries where TAGS(tag1)='v1'; ``` ### 1.3 时间序列路径管理 @@ -202,54 +202,48 @@ show timeseries where TAGS(tag1)='v1' #### 查看路径的所有子路径 ```sql -SHOW CHILD PATHS pathPattern -- 查询 root.ln 的下一层:show child paths root.ln -- 查询形如 root.xx.xx.xx 的路径:show child paths root.*.* +SHOW CHILD PATHS pathPattern; +- 查询 root.ln 的下一层; +show child paths root.ln; +- 查询形如 root.xx.xx.xx 的路径; +show child paths root.*.*; ``` #### 查看路径的所有子节点 ```sql -SHOW CHILD NODES pathPattern - -- 查询 root 的下一层:show child nodes root -- 查询 root.ln 的下一层 :show child nodes root.ln +SHOW CHILD NODES pathPattern; +- 查询 root 的下一层; +show child nodes root; +- 查询 root.ln 的下一层; +show child nodes root.ln; ``` #### 查看设备 ```sql -IoTDB> show devices - -IoTDB> show devices root.ln.** - -IoTDB> show devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +show devices; +show devices root.ln.**; +show devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; ``` ##### 查看设备及其 database 信息 ```sql -IoTDB> show devices with database - -IoTDB> show devices root.ln.** with database +show devices with database; +show devices root.ln.** with database; ``` #### 统计节点数 ```sql -IoTDB > COUNT NODES root.** LEVEL=2 - -IoTDB > COUNT NODES root.ln.** LEVEL=2 - -IoTDB > COUNT NODES root.ln.wf01.* LEVEL=3 - -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.* LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` #### 统计设备数量 ```sql - -IoTDB> count devices - -IoTDB> count devices root.ln.** - -IoTDB> count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +count devices; +count devices root.ln.**; +count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; ``` ### 1.4 设备模板管理 @@ -262,136 +256,106 @@ IoTDB> count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26: #### 创建设备模板 ```sql -CREATE DEVICE TEMPLATE ALIGNED? '(' [',' ]+ ')' +CREATE DEVICE TEMPLATE ALIGNED? '(' [',' ]+ ')'; ``` 创建包含两个非对齐序列的设备模板 ```sql -IoTDB> create device template t1 (temperature FLOAT, status BOOLEAN) +create device template t1 (temperature FLOAT, status BOOLEAN); ``` 创建包含一组对齐序列的设备模板 ```sql -IoTDB> create device template t2 aligned (lat FLOAT, lon FLOAT) +create device template t2 aligned (lat FLOAT, lon FLOAT); ``` #### 挂载设备模板 ```sql -IoTDB> set DEVICE TEMPLATE t1 to root.sg1 +set DEVICE TEMPLATE t1 to root.sg1; ``` #### 激活设备模板 ```sql -IoTDB> create timeseries using DEVICE TEMPLATE on root.sg1.d1 - -IoTDB> set DEVICE TEMPLATE t1 to root.sg1.d1 - -IoTDB> set DEVICE TEMPLATE t2 to root.sg1.d2 - -IoTDB> create timeseries using device template on root.sg1.d1 - -IoTDB> create timeseries using device template on root.sg1.d2 +create timeseries using DEVICE TEMPLATE on root.sg1.d1; +set DEVICE TEMPLATE t1 to root.sg1.d1; +set DEVICE TEMPLATE t2 to root.sg1.d2; +create timeseries using device template on root.sg1.d1; +create timeseries using device template on root.sg1.d2; ``` #### 查看设备模板 ```sql -IoTDB> show device templates +show device templates; ``` - 查看某个设备模板下的物理量 ```sql -IoTDB> show nodes in device template t1 +show nodes in device template t1; ``` - 查看挂载了某个设备模板的路径 ```sql -IoTDB> show paths set device template t1 +show paths set device template t1; ``` - 查看使用了某个设备模板的路径(即模板在该路径上已激活,序列已创建) ```sql -IoTDB> show paths using device template t1 +show paths using device template t1; ``` #### 解除设备模板 ```sql -IoTDB> delete timeseries of device template t1 from root.sg1.d1 -``` -```sql -IoTDB> deactivate device template t1 from root.sg1.d1 +delete timeseries of device template t1 from root.sg1.d1; +deactivate device template t1 from root.sg1.d1; ``` 批量处理 ```sql -IoTDB> delete timeseries of device template t1 from root.sg1.*, root.sg2.* -``` -```sql -IoTDB> deactivate device template t1 from root.sg1.*, root.sg2.* +delete timeseries of device template t1 from root.sg1.*, root.sg2.*; +deactivate device template t1 from root.sg1.*, root.sg2.*; ``` #### 卸载设备模板 ```sql -IoTDB> unset device template t1 from root.sg1.d1 +unset device template t1 from root.sg1.d1; ``` #### 删除设备模板 ```sql -IoTDB> drop device template t1 +drop device template t1; ``` ### 1.5 数据存活时间管理 #### 设置 TTL ```sql -IoTDB> set ttl to root.ln 3600000 -``` -```sql -IoTDB> set ttl to root.sgcc.** 3600000 -``` -```sql -IoTDB> set ttl to root.** 3600000 +set ttl to root.ln 3600000; +set ttl to root.sgcc.** 3600000; +set ttl to root.** 3600000; ``` #### 取消 TTL ```sql -IoTDB> unset ttl from root.ln -``` -```sql -IoTDB> unset ttl from root.sgcc.** -``` -```sql -IoTDB> unset ttl from root.** +unset ttl from root.ln; +unset ttl from root.sgcc.**; +unset ttl from root.**; ``` #### 显示 TTL ```sql -IoTDB> SHOW ALL TTL -``` -```sql -IoTDB> SHOW TTL ON pathPattern -``` -```sql -IoTDB> show DEVICES +SHOW ALL TTL; +SHOW TTL ON pathPattern; +show DEVICES; ``` ## 2. 写入数据 ### 2.1 写入单列数据 ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) -``` -```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'),(2, 'v1') +insert into root.ln.wf02.wt02(timestamp,status) values(1,true); +insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'),(2, 'v1'); ``` ### 2.2 写入多列数据 ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) values (2, false, 'v2') -``` -```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4') +insert into root.ln.wf02.wt02(timestamp, status, hardware) values (2, false, 'v2'); +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4'); ``` ### 2.3 使用服务器时间戳 ```sql -IoTDB > insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2') +insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2'); ``` ### 2.4 写入对齐时间序列数据 ```sql -IoTDB > create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE) -``` -```sql -IoTDB > insert into root.sg1.d1(timestamp, s1, s2) aligned values(1, 1, 1) -``` -```sql -IoTDB > insert into root.sg1.d1(timestamp, s1, s2) aligned values(2, 2, 2), (3, 3, 3) -``` -```sql -IoTDB > select * from root.sg1.d1 +create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE); +insert into root.sg1.d1(timestamp, s1, s2) aligned values(1, 1, 1); +insert into root.sg1.d1(timestamp, s1, s2) aligned values(2, 2, 2), (3, 3, 3); +select * from root.sg1.d1; ``` ### 2.5 加载 TsFile 文件数据 @@ -417,43 +381,24 @@ load '' [sglevel=int][onSuccess=delete/none] ### 3.1 删除单列数据 ```sql delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; -``` -```sql delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -``` -```sql -delete from root.ln.wf02.wt02.status where time < 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time <= 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time < 20 and time > 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time > 20 -``` -```sql -delete from root.ln.wf02.wt02.status where time >= 20 -``` -```sql -delete from root.ln.wf02.wt02.status where time = 20 +delete from root.ln.wf02.wt02.status where time < 10; +delete from root.ln.wf02.wt02.status where time <= 10; +delete from root.ln.wf02.wt02.status where time < 20 and time > 10; +delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10; +delete from root.ln.wf02.wt02.status where time > 20; +delete from root.ln.wf02.wt02.status where time >= 20; +delete from root.ln.wf02.wt02.status where time = 20; ``` 出错: ```sql -delete from root.ln.wf02.wt02.status where time > 4 or time < 0 - -Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic - -expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' +delete from root.ln.wf02.wt02.status where time > 4 or time < 0; +Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' ``` 删除时间序列中的所有数据: ```sql -delete from root.ln.wf02.wt02.status +delete from root.ln.wf02.wt02.status; ``` ### 3.2 删除多列数据 ```sql @@ -461,8 +406,7 @@ delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; ``` 声明式的编程方式: ```sql -IoTDB> delete from root.ln.wf03.wt02.status where time < now() - +delete from root.ln.wf03.wt02.status where time < now(); Msg: The statement is executed successfully. ``` ## 4. 数据查询 @@ -471,7 +415,7 @@ Msg: The statement is executed successfully. #### 时间过滤查询 ```sql -select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` #### 根据一个时间区间选择多列数据 ```sql @@ -502,9 +446,7 @@ select s1 as temperature, s2 as speed from root.ln.wf01.wt01; 不支持: ```sql select s1, count(s1) from root.sg.d1; - select sin(s1), count(s1) from root.sg.d1; - select s1, count(s1) from root.sg.d1 group by ([10,100),10ms); ``` ##### 时间序列查询嵌套表达式 @@ -512,67 +454,50 @@ select s1, count(s1) from root.sg.d1 group by ([10,100),10ms); 示例 1: ```sql select a, - -​ b, - -​ ((a + 1) * 2 - 1) % 2 + 1.5, - -​ sin(a + sin(a + sin(b))), - -​ -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 - + b, + ((a + 1) * 2 - 1) % 2 + 1.5, + sin(a + sin(a + sin(b))), + -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 from root.sg1; ``` 示例 2: ```sql -select (a + b) * 2 + sin(a) from root.sg +select (a + b) * 2 + sin(a) from root.sg; +``` 示例 3: - -select (a + *) / 2 from root.sg1 - +```sql +select (a + *) / 2 from root.sg1; +``` 示例 4: - -select (a + b) * 3 from root.sg, root.ln +```sql +select (a + b) * 3 from root.sg, root.ln; ``` ##### 聚合查询嵌套表达式 示例 1: ```sql select avg(temperature), - -​ sin(avg(temperature)), - -​ avg(temperature) + 1, - -​ -sum(hardware), - -​ avg(temperature) + sum(hardware) - + sin(avg(temperature)), + avg(temperature) + 1, + -sum(hardware), + avg(temperature) + sum(hardware) from root.ln.wf01.wt01; ``` 示例 2: ```sql select avg(*), - -​ (avg(*) + 1) * 3 / 2 -1 - -from root.sg1 + (avg(*) + 1) * 3 / 2 -1 +from root.sg1; ``` 示例 3: ```sql select avg(temperature), - -​ sin(avg(temperature)), - -​ avg(temperature) + 1, - -​ -sum(hardware), - -​ avg(temperature) + sum(hardware) as custom_sum - + sin(avg(temperature)), + avg(temperature) + 1, + -sum(hardware), + avg(temperature) + sum(hardware) as custom_sum from root.ln.wf01.wt01 - GROUP BY([10, 90), 10ms); ``` #### 最新点查询 @@ -585,15 +510,15 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < 查询 root.ln.wf01.wt01.status 的最新数据点 ```sql -IoTDB> select last status from root.ln.wf01.wt01 +select last status from root.ln.wf01.wt01; ``` 查询 root.ln.wf01.wt01 下 status,temperature 时间戳大于等于 2017-11-07T23:50:00 的最新数据点 ```sql -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; ``` 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照序列名降序排列 ```sql -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +select last * from root.ln.wf01.wt01 order by timeseries desc; ``` ### 4.3 查询过滤条件 @@ -648,20 +573,20 @@ select code from root.sg1.d1 where temperature is not null; 查询 `root.sg.d1` 下 `value` 含有`'cc'`的数据 ```sql -IoTDB> select * from root.sg.d1 where value like '%cc%' +select * from root.sg.d1 where value like '%cc%'; ``` 查询 `root.sg.d1` 下 `value` 中间为 `'b'`、前后为任意单个字符的数据 ```sql -IoTDB> select * from root.sg.device where value like '_b_' +select * from root.sg.device where value like '_b_'; ``` 查询 root.sg.d1 下 value 值为26个英文字符组成的字符串 ```sql -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; ``` 查询 root.sg.d1 下 value 值为26个小写英文字符组成的字符串且时间大于100的 ```sql -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; ``` ### 4.4 分段分组聚合 @@ -704,23 +629,23 @@ select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017 统计不同 database 下 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 1 +select count(status) from root.** group by level = 1; ``` 统计不同设备下 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 3 +select count(status) from root.** group by level = 3; ``` 统计不同 database 下的不同设备中 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 1, 3 +select count(status) from root.** group by level = 1, 3; ``` 查询所有序列下温度传感器 temperature 的最大值 ```sql -select max_value(temperature) from root.** group by level = 0 +select max_value(temperature) from root.** group by level = 0; ``` 查询某一层级下所有传感器拥有的总数据点数 ```sql -select count(*) from root.ln.** group by level = 2 +select count(*) from root.ln.** group by level = 2; ``` #### 标签分组聚合 @@ -738,19 +663,19 @@ SELECT AVG(temperature) FROM root.factory1.** GROUP BY ([1000, 10000), 5s), TAGS ``` #### 差值分段聚合 ```sql -group by variation(controlExpression[,delta][,ignoreNull=true/false]) +group by variation(controlExpression[,delta][,ignoreNull=true/false]); ``` ##### delta=0时的等值事件分段 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); ``` 指定ignoreNull为false ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); ``` ##### delta!=0时的差值事件分段 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); ``` #### 条件分段聚合 ```sql @@ -758,11 +683,11 @@ group by condition(predict,[keep>/>=/=/<=/<]threshold,[,ignoreNull=true/false]) ``` 查询至少连续两行以上的charging_status=1的数据 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true); ``` 当设置`ignoreNull`为false时,遇到null值为将其视为一个不满足条件的行,得到结果原先的分组被含null的行拆分 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false); ``` #### 会话分段聚合 ```sql @@ -770,38 +695,35 @@ group by session(timeInterval) ``` 按照不同的时间单位设定时间间隔 ```sql -select __endTime,count(*) from root.** group by session(1d) +select __endTime,count(*) from root.** group by session(1d); ``` 和`HAVING`、`ALIGN BY DEVICE`共同使用 ```sql -select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` #### 点数分段聚合 ```sql group by count(controlExpression, size[,ignoreNull=true/false]) ``` -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) - +```sql +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); +``` 当使用ignoreNull将null值也考虑进来 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` ### 4.5 聚合结果过滤 不正确的: ```sql -select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 - -select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 - -select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 - -select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; +select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` SQL 示例: ```sql select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 2; - select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` ### 4.6 结果集补空值 @@ -834,37 +756,37 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: 基本的 `LIMIT` 子句 ```sql -select status, temperature from root.ln.wf01.wt01 limit 10 +select status, temperature from root.ln.wf01.wt01 limit 10; ``` 带 `OFFSET` 的 `LIMIT` 子句 ```sql -select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; ``` `LIMIT` 子句与 `WHERE` 子句结合 ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 5 offset 3 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 5 offset 3; ``` `LIMIT` 子句与 `GROUP BY` 子句组合 ```sql -select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3 +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3; ``` #### 按列分页 基本的 `SLIMIT` 子句 ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; ``` 带 `SOFFSET` 的 `SLIMIT` 子句 ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; ``` `SLIMIT` 子句与 `GROUP BY` 子句结合 ```sql -select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` `SLIMIT` 子句与 `LIMIT` 子句结合 ```sql -select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` ### 4.8 排序 @@ -886,7 +808,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 对聚合后的结果进行排序 ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` ### 4.9 查询对齐模式 @@ -899,50 +821,36 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; #### 整体描述 ```sql selectIntoStatement - -​ : SELECT - -​ resultColumn [, resultColumn] ... - -​ INTO intoItem [, intoItem] ... - -​ FROM prefixPath [, prefixPath] ... - -​ [WHERE whereCondition] - -​ [GROUP BY groupByTimeClause, groupByLevelClause] - -​ [FILL ({PREVIOUS | LINEAR | constant} (, interval=DURATION_LITERAL)?)] - -​ [LIMIT rowLimit OFFSET rowOffset] - -​ [ALIGN BY DEVICE] - -​ ; - - + : SELECT + resultColumn [, resultColumn] ... + INTO intoItem [, intoItem] ... + FROM prefixPath [, prefixPath] ... + [WHERE whereCondition] + [GROUP BY groupByTimeClause, groupByLevelClause] + [FILL ({PREVIOUS | LINEAR | constant} (, interval=DURATION_LITERAL)?)] + [LIMIT rowLimit OFFSET rowOffset] + [ALIGN BY DEVICE] + ; intoItem - -​ : [ALIGNED] intoDevicePath '(' intoMeasurementName [',' intoMeasurementName]* ')' - -​ ; + : [ALIGNED] intoDevicePath '(' intoMeasurementName [',' intoMeasurementName]* ')' + ; ``` 按时间对齐,将 `root.sg` database 下四条序列的查询结果写入到 `root.sg_copy` database 下指定的四条序列中 ```sql -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; ``` 按时间对齐,将聚合查询的结果存储到指定序列中 ```sql -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); ``` 按设备对齐 ```sql -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` 按设备对齐,将表达式计算的结果存储到指定序列中 ```sql -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; ``` #### 使用变量占位符 @@ -950,21 +858,15 @@ IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) fr ###### 目标设备不使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select s1, s2 - into root.sg_copy.d1(::), root.sg_copy.d2(s1), root.sg_copy.d1(${3}), root.sg_copy.d2(::) - from root.sg.d1, root.sg.d2; ``` 该语句等价于: ```sql - select s1, s2 - into root.sg_copy.d1(s1), root.sg_copy.d2(s1), root.sg_copy.d1(s2), root.sg_copy.d2(s2) - from root.sg.d1, root.sg.d2; ``` @@ -972,9 +874,7 @@ from root.sg.d1, root.sg.d2; ```sql select d1.s1, d1.s2, d2.s3, d3.s4 - into ::(s1_1, s2_2), root.sg.d2_2(s3_3), root.${2}_copy.::(s4) - from root.sg; ``` @@ -988,47 +888,37 @@ select * into root.sg_bk.::(::) from root.sg.**; ###### 目标设备不使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select s1, s2, s3, s4 - into root.backup_sg.d1(s1, s2, s3, s4), root.backup_sg.d2(::), root.sg.d3(backup_${4}) - from root.sg.d1, root.sg.d2, root.sg.d3 - align by device; ``` ###### 目标设备使用变量占位符 & 目标物理量列表不使用变量占位符 ```sql - select avg(s1), sum(s2) + sum(s3), count(s4) - into root.agg_${2}.::(avg_s1, sum_s2_add_s3, count_s4) - from root.** - align by device; ``` ###### 目标设备使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select * into ::(backup_${4}) from root.sg.** align by device; ``` #### 指定目标序列为对齐序列 ```sql - select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` ## 5. 运维语句 生成对应的查询计划 ```sql -explain select s1,s2 from root.sg.d1 +explain select s1,s2 from root.sg.d1; ``` 执行对应的查询语句,并获取分析结果 ```sql -explain analyze select s1,s2 from root.sg.d1 order by s1 +explain analyze select s1,s2 from root.sg.d1 order by s1; ``` ## 6. 运算符 @@ -1039,7 +929,7 @@ explain analyze select s1,s2 from root.sg.d1 order by s1 更多见文档 [Arithmetic Operators and Functions](./Operator-and-Expression.md#_1-1-算数运算符) ```sql -select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1 +select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1; ``` ### 6.2 比较运算符 @@ -1047,27 +937,27 @@ select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root 更多见文档[Comparison Operators and Functions](./Operator-and-Expression.md#_1-2-比较运算符) ```sql -# Basic comparison operators +-- Basic comparison operators; select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; -# `BETWEEN ... AND ...` operator +-- `BETWEEN ... AND ...` operator; select temperature from root.sg1.d1 where temperature between 36.5 and 40; select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -# Fuzzy matching operator: Use `Like` for fuzzy matching -select * from root.sg.d1 where value like '%cc%' -select * from root.sg.device where value like '_b_' +-- Fuzzy matching operator: Use `Like` for fuzzy matching; +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; -# Fuzzy matching operator: Use `Regexp` for fuzzy matching -select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +-- Fuzzy matching operator: Use `Regexp` for fuzzy matching; +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; select b, b like '1%', b regexp '[0-2]' from root.test; -# `IS NULL` operator +-- `IS NULL` operator; select code from root.sg1.d1 where temperature is null; select code from root.sg1.d1 where temperature is not null; -# `IN` operator +-- `IN` operator; select code from root.sg1.d1 where code in ('200', '300', '400', '500'); select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); select a, a in (1, 2) from root.test; @@ -1123,25 +1013,25 @@ select ts, in_range(ts, 'lower'='2', 'upper'='3.1') from root.test; ```sql select s1, string_contains(s1, 's'='warn') from root.sg1.d4; select s1, string_matches(s1, 'regex'='[^\\s]+37229') from root.sg1.d4; -select s1, length(s1) from root.sg1.d1 -select s1, locate(s1, "target"="1") from root.sg1.d1 -select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 -select s1, startswith(s1, "target"="1") from root.sg1.d1 -select s1, endswith(s1, "target"="1") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 -select s1, substring(s1 from 1 for 2) from root.sg1.d1 -select s1, replace(s1, 'es', 'tt') from root.sg1.d1 -select s1, upper(s1) from root.sg1.d1 -select s1, lower(s1) from root.sg1.d1 -select s3, trim(s3) from root.sg1.d1 -select s1, s2, strcmp(s1, s2) from root.sg1.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1 -select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 +select s1, length(s1) from root.sg1.d1; +select s1, locate(s1, "target"="1") from root.sg1.d1; +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1; +select s1, startswith(s1, "target"="1") from root.sg1.d1; +select s1, endswith(s1, "target"="1") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1; +select s1, substring(s1 from 1 for 2) from root.sg1.d1; +select s1, replace(s1, 'es', 'tt') from root.sg1.d1; +select s1, upper(s1) from root.sg1.d1; +select s1, lower(s1) from root.sg1.d1; +select s3, trim(s3) from root.sg1.d1; +select s1, s2, strcmp(s1, s2) from root.sg1.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1; +select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1; +select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1; ``` ### 7.5 数据类型转换函数 @@ -1149,7 +1039,7 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 更多见文档[Data Type Conversion Function](./Operator-and-Expression.md#_2-5-数据类型转换函数) ```sql -SELECT cast(s1 as INT32) from root.sg +SELECT cast(s1 as INT32) from root.sg; ``` ### 7.6 常序列生成函数 @@ -1197,8 +1087,8 @@ select equal_size_bucket_agg_sample(temperature, 'type'='avg','proportion'='0.1' select equal_size_bucket_m4_sample(temperature, 'proportion'='0.1') as M4_sample from root.ln.wf01.wt01; select equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='avg', 'number'='2') as outlier_avg_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='stendis', 'number'='2') as outlier_stendis_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='cos', 'number'='2') as outlier_cos_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='prenextdis', 'number'='2') as outlier_prenextdis_sample from root.ln.wf01.wt01; -select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1 -select M4(s1,'windowSize'='10') from root.vehicle.d1 +select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1; +select M4(s1,'windowSize'='10') from root.vehicle.d1; ``` ### 7.11 时间序列处理函数 @@ -1206,7 +1096,7 @@ select M4(s1,'windowSize'='10') from root.vehicle.d1 更多见文档[Time-Series](./Operator-and-Expression.md#_2-11-时间序列处理函数) ```sql -select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1 +select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1; ``` ## 8. 数据质量函数库 @@ -1218,24 +1108,24 @@ select change_points(s1), change_points(s2), change_points(s3), change_points(s4 更多见文档[Data-Quality](../SQL-Manual/UDF-Libraries.md#数据质量) ```sql -# Completeness -select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Completeness; +select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Consistency -select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Consistency; +select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Timeliness -select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Timeliness; +select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Validity -select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Validity; +select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Accuracy -select Accuracy(t1,t2,t3,m1,m2,m3) from root.test +# Accuracy; +select Accuracy(t1,t2,t3,m1,m2,m3) from root.test; ``` ### 8.2 数据画像 @@ -1243,79 +1133,79 @@ select Accuracy(t1,t2,t3,m1,m2,m3) from root.test 更多见文档[Data-Profiling](../SQL-Manual/UDF-Libraries.md#数据画像) ```sql -# ACF -select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05 +# ACF; +select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05; -# Distinct -select distinct(s2) from root.test.d2 +# Distinct; +select distinct(s2) from root.test.d2; -# Histogram -select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1 +# Histogram; +select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1; -# Integral -select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 -select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10 +# Integral; +select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; +select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10; -# IntegralAvg -select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 +# IntegralAvg; +select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; -# Mad -select mad(s0) from root.test -select mad(s0, "error"="0.01") from root.test +# Mad; +select mad(s0) from root.test; +select mad(s0, "error"="0.01") from root.test; -# Median -select median(s0, "error"="0.01") from root.test +# Median; +select median(s0, "error"="0.01") from root.test; -# MinMax -select minmax(s1) from root.test +# MinMax; +select minmax(s1) from root.test; -# Mode -select mode(s2) from root.test.d2 +# Mode; +select mode(s2) from root.test.d2; -# MvAvg -select mvavg(s1, "window"="3") from root.test +# MvAvg; +select mvavg(s1, "window"="3") from root.test; -# PACF -select pacf(s1, "lag"="5") from root.test +# PACF; +select pacf(s1, "lag"="5") from root.test; -# Percentile -select percentile(s0, "rank"="0.2", "error"="0.01") from root.test +# Percentile; +select percentile(s0, "rank"="0.2", "error"="0.01") from root.test; -# Quantile -select quantile(s0, "rank"="0.2", "K"="800") from root.test +# Quantile; +select quantile(s0, "rank"="0.2", "K"="800") from root.test; -# Period -select period(s1) from root.test.d3 +# Period; +select period(s1) from root.test.d3; -# QLB -select QLB(s1) from root.test.d1 +# QLB; +select QLB(s1) from root.test.d1; -# Resample -select resample(s1,'every'='5m','interp'='linear') from root.test.d1 -select resample(s1,'every'='30m','aggr'='first') from root.test.d1 -select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1 +# Resample; +select resample(s1,'every'='5m','interp'='linear') from root.test.d1; +select resample(s1,'every'='30m','aggr'='first') from root.test.d1; +select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1; -# Sample -select sample(s1,'method'='reservoir','k'='5') from root.test.d1 -select sample(s1,'method'='isometric','k'='5') from root.test.d1 +# Sample; +select sample(s1,'method'='reservoir','k'='5') from root.test.d1; +select sample(s1,'method'='isometric','k'='5') from root.test.d1; -# Segment -select segment(s1, "error"="0.1") from root.test +# Segment; +select segment(s1, "error"="0.1") from root.test; -# Skew -select skew(s1) from root.test.d1 +# Skew; +select skew(s1) from root.test.d1; -# Spline -select spline(s1, "points"="151") from root.test +# Spline; +select spline(s1, "points"="151") from root.test; -# Spread -select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 +# Spread; +select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; -# Stddev -select stddev(s1) from root.test.d1 +# Stddev; +select stddev(s1) from root.test.d1; -# ZScore -select zscore(s1) from root.test +# ZScore; +select zscore(s1) from root.test; ``` ### 8.3 异常检测 @@ -1323,34 +1213,34 @@ select zscore(s1) from root.test 更多见文档[Anomaly-Detection](../SQL-Manual/UDF-Libraries.md#异常检测) ```sql -# IQR -select iqr(s1) from root.test +# IQR; +select iqr(s1) from root.test; -# KSigma -select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# KSigma; +select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# LOF -select lof(s1,s2) from root.test.d1 where time<1000 -select lof(s1, "method"="series") from root.test.d1 where time<1000 +# LOF; +select lof(s1,s2) from root.test.d1 where time<1000; +select lof(s1, "method"="series") from root.test.d1 where time<1000; -# MissDetect -select missdetect(s2,'minlen'='10') from root.test.d2 +# MissDetect; +select missdetect(s2,'minlen'='10') from root.test.d2; -# Range -select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# Range; +select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# TwoSidedFilter -select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test +# TwoSidedFilter; +select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test; -# Outlier -select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test +# Outlier; +select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test; -# MasterTrain -select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test +# MasterTrain; +select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test; -# MasterDetect -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test +# MasterDetect; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test; ``` ### 8.4 频域分析 @@ -1358,31 +1248,31 @@ select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3 更多见文档[Frequency-Domain](../SQL-Manual/UDF-Libraries.md#频域分析) ```sql -# Conv -select conv(s1,s2) from root.test.d2 +# Conv; +select conv(s1,s2) from root.test.d2; -# Deconv -select deconv(s3,s2) from root.test.d2 -select deconv(s3,s2,'result'='remainder') from root.test.d2 +# Deconv; +select deconv(s3,s2) from root.test.d2; +select deconv(s3,s2,'result'='remainder') from root.test.d2; -# DWT -select dwt(s1,"method"="haar") from root.test.d1 +# DWT; +select dwt(s1,"method"="haar") from root.test.d1; -# FFT -select fft(s1) from root.test.d1 -select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1 +# FFT; +select fft(s1) from root.test.d1; +select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1; -# HighPass -select highpass(s1,'wpass'='0.45') from root.test.d1 +# HighPass; +select highpass(s1,'wpass'='0.45') from root.test.d1; -# IFFT -select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1 +# IFFT; +select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1; -# LowPass -select lowpass(s1,'wpass'='0.45') from root.test.d1 +# LowPass; +select lowpass(s1,'wpass'='0.45') from root.test.d1; -# Envelope -select envelope(s1) from root.test.d1 +# Envelope; +select envelope(s1) from root.test.d1; ``` ### 8.5 数据匹配 @@ -1390,20 +1280,20 @@ select envelope(s1) from root.test.d1 更多见文档[Data-Matching](../SQL-Manual/UDF-Libraries.md#数据匹配) ```sql -# Cov -select cov(s1,s2) from root.test.d2 +# Cov; +select cov(s1,s2) from root.test.d2; -# DTW -select dtw(s1,s2) from root.test.d2 +# DTW; +select dtw(s1,s2) from root.test.d2; -# Pearson -select pearson(s1,s2) from root.test.d2 +# Pearson; +select pearson(s1,s2) from root.test.d2; -# PtnSym -select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1 +# PtnSym; +select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1; -# XCorr -select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 +# XCorr; +select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05; ``` ### 8.6 数据修复 @@ -1411,24 +1301,24 @@ select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 更多见文档[Data-Repairing](../SQL-Manual/UDF-Libraries.md#数据修复) ```sql -# TimestampRepair -select timestamprepair(s1,'interval'='10000') from root.test.d2 -select timestamprepair(s1) from root.test.d2 +# TimestampRepair; +select timestamprepair(s1,'interval'='10000') from root.test.d2; +select timestamprepair(s1) from root.test.d2; -# ValueFill -select valuefill(s1) from root.test.d2 -select valuefill(s1,"method"="previous") from root.test.d2 +# ValueFill; +select valuefill(s1) from root.test.d2; +select valuefill(s1,"method"="previous") from root.test.d2; -# ValueRepair -select valuerepair(s1) from root.test.d2 -select valuerepair(s1,'method'='LsGreedy') from root.test.d2 +# ValueRepair; +select valuerepair(s1) from root.test.d2; +select valuerepair(s1,'method'='LsGreedy') from root.test.d2; -# MasterRepair -select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test +# MasterRepair; +select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test; -# SeasonalRepair -select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2 -select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 +# SeasonalRepair; +select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2; +select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2; ``` ### 8.7 序列发现 @@ -1436,12 +1326,12 @@ select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 更多见文档[Series-Discovery](../SQL-Manual/UDF-Libraries.md#序列发现) ```sql -# ConsecutiveSequences -select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1 -select consecutivesequences(s1,s2) from root.test.d1 +# ConsecutiveSequences; +select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1; +select consecutivesequences(s1,s2) from root.test.d1; -# ConsecutiveWindows -select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 +# ConsecutiveWindows; +select consecutivewindows(s1,s2,'length'='10m') from root.test.d1; ``` ### 8.8 机器学习 @@ -1449,14 +1339,14 @@ select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 更多见文档[Machine-Learning](../SQL-Manual/UDF-Libraries.md#机器学习) ```sql -# AR -select ar(s0,"p"="2") from root.test.d0 +# AR; +select ar(s0,"p"="2") from root.test.d0; -# Representation -select representation(s0,"tb"="3","vb"="2") from root.test.d0 +# Representation; +select representation(s0,"tb"="3","vb"="2") from root.test.d0; -# RM -select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 +# RM; +select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0; ``` ## 9. 条件表达式 @@ -1469,24 +1359,24 @@ when 1000=1050 then "bad temperature" when P<=1000000 or P>=1100000 then "bad pressure" end as `result` -from root.test1 +from root.test1; select str, case when str like "%cc%" then "has cc" when str like "%dd%" then "has dd" else "no cc and dd" end as `result` -from root.test2 +from root.test2; select count(case when x<=1 then 1 end) as `(-∞,1]`, count(case when 1 SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 配置连续查询的时间窗口大小 ```sql CREATE CONTINUOUS QUERY cq2 - RESAMPLE RANGE 40s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) +END; -END - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 同时配置连续查询执行的周期性间隔和时间窗口大小 ```sql CREATE CONTINUOUS QUERY cq3 - RESAMPLE EVERY 20s RANGE 40s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) - FILL(100.0) +END; -END - - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 配置连续查询每次查询执行时间窗口的结束时间 ```sql CREATE CONTINUOUS QUERY cq4 - RESAMPLE EVERY 20s RANGE 40s, 20s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) - FILL(100.0) +END; -END - - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 没有GROUP BY TIME子句的连续查询 ```sql CREATE CONTINUOUS QUERY cq5 - RESAMPLE EVERY 20s - BEGIN - SELECT temperature + 1 - INTO root.precalculated_sg.::(temperature) - FROM root.ln.*.* - align by device +END; -END - - - -\> SELECT temperature from root.precalculated_sg.*.* align by device; +SELECT temperature from root.precalculated_sg.*.* align by device; ``` ### 11.2 连续查询的管理 @@ -1761,18 +1567,12 @@ DROP CONTINUOUS QUERY s1_count_cq; 1. 创建一个连续查询 ```sql CREATE CQ s1_count_cq - BEGIN - -​ SELECT count(s1) - -​ INTO root.sg_count.d.count_s1 - -​ FROM root.sg.d - -​ GROUP BY(30m) - -END + SELECT count(s1) + INTO root.sg_count.d.count_s1 + FROM root.sg.d + GROUP BY(30m) +END; ``` 1. 查询连续查询的结果 ```sql @@ -1792,11 +1592,11 @@ CREATE FUNCTION AS (USING URI URI-STRING)? #### 不指定URI ```sql -CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' +CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample'; ``` #### 指定URI ```sql -CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http://jar/example.jar' +CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http://jar/example.jar'; ``` ### 12.3 UDF 卸载 @@ -1804,7 +1604,7 @@ CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http:// DROP FUNCTION ``` ```sql -DROP FUNCTION example +DROP FUNCTION example; ``` ### 12.4 UDF 查询 @@ -1818,16 +1618,13 @@ SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; #### 与其他查询的嵌套查询 ```sql SELECT s1, s2, example(s1, s2) FROM root.sg.d1; - SELECT *, example(*) FROM root.sg.d1 DISABLE ALIGN; - SELECT s1 * example(* / s1 + s2) FROM root.sg.d1; - SELECT s1, s2, s1 + example(s1, s2), s1 - example(s1 + example(s1, s2) / s2) FROM root.sg.d1; ``` ### 12.5 查看所有注册的 UDF ```sql -SHOW FUNCTIONS +SHOW FUNCTIONS; ``` ## 13. 权限管理 @@ -1835,68 +1632,63 @@ SHOW FUNCTIONS - 创建用户(需 MANAGE_USER 权限) - ```SQL -CREATE USER -eg: CREATE USER user1 'passwd' +CREATE USER ; +eg: CREATE USER user1 'passwd'; ``` - 删除用户 (需 MANEGE_USER 权限) - ```SQL -DROP USER -eg: DROP USER user1 +DROP USER ; +eg: DROP USER user1; ``` - 创建角色 (需 MANAGE_ROLE 权限) ```SQL -CREATE ROLE -eg: CREATE ROLE role1 +CREATE ROLE ; +eg: CREATE ROLE role1; ``` - 删除角色 (需 MANAGE_ROLE 权限) - ```SQL -DROP ROLE -eg: DROP ROLE role1 +DROP ROLE ; +eg: DROP ROLE role1; ``` - 赋予用户角色 (需 MANAGE_ROLE 权限) - ```SQL -GRANT ROLE TO -eg: GRANT ROLE admin TO user1 +GRANT ROLE TO ; +eg: GRANT ROLE admin TO user1; ``` - 移除用户角色 (需 MANAGE_ROLE 权限) - ```SQL -REVOKE ROLE FROM -eg: REVOKE ROLE admin FROM user1 +REVOKE ROLE FROM ; +eg: REVOKE ROLE admin FROM user1; ``` - 列出所有用户 (需 MANEGE_USER 权限) ```SQL -LIST USER +LIST USER; ``` - 列出所有角色 (需 MANAGE_ROLE 权限) ```SQL -LIST ROLE +LIST ROLE; ``` - 列出指定角色下所有用户 (需 MANEGE_USER 权限) ```SQL -LIST USER OF ROLE -eg: LIST USER OF ROLE roleuser +LIST USER OF ROLE ; +eg: LIST USER OF ROLE roleuser; ``` - 列出指定用户下所有角色 @@ -1904,8 +1696,8 @@ eg: LIST USER OF ROLE roleuser 用户可以列出自己的角色,但列出其他用户的角色需要拥有 MANAGE_ROLE 权限。 ```SQL -LIST ROLE OF USER -eg: LIST ROLE OF USER tempuser +LIST ROLE OF USER ; +eg: LIST ROLE OF USER tempuser; ``` - 列出用户所有权限 @@ -1915,7 +1707,6 @@ eg: LIST ROLE OF USER tempuser ```SQL LIST PRIVILEGES OF USER ; eg: LIST PRIVILEGES OF USER tempuser; - ``` - 列出角色所有权限 diff --git a/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md b/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md index 2e42cd67f..44e4a6f00 100644 --- a/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md +++ b/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md @@ -7,32 +7,32 @@ #### 创建数据库 ```sql -CREATE DATABASE root.ln +CREATE DATABASE root.ln; ``` #### 查看数据库 ```sql -show databases -show databases root.* -show databases root.** +show databases; +show databases root.*; +show databases root.**; ``` #### 删除数据库 ```sql -DELETE DATABASE root.ln -DELETE DATABASE root.sgcc -DELETE DATABASE root.** +DELETE DATABASE root.ln; +DELETE DATABASE root.sgcc; +DELETE DATABASE root.**; ``` #### 统计数据库数量 ```sql -count databases -count databases root.* -count databases root.sgcc.* -count databases root.sgcc +count databases; +count databases root.*; +count databases root.sgcc.*; +count databases root.sgcc; ``` ### 1.2 时间序列管理 @@ -40,119 +40,119 @@ count databases root.sgcc #### 创建时间序列 ```sql -create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN -create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT -create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT -create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN -create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN -create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT +create timeseries root.ln.wf01.wt01.status with datatype=BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature with datatype=FLOAT; +create timeseries root.ln.wf02.wt02.hardware with datatype=TEXT; +create timeseries root.ln.wf02.wt02.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status with datatype=BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature with datatype=FLOAT; ``` - 简化版 ```sql -create timeseries root.ln.wf01.wt01.status BOOLEAN -create timeseries root.ln.wf01.wt01.temperature FLOAT -create timeseries root.ln.wf02.wt02.hardware TEXT -create timeseries root.ln.wf02.wt02.status BOOLEAN -create timeseries root.sgcc.wf03.wt01.status BOOLEAN -create timeseries root.sgcc.wf03.wt01.temperature FLOAT +create timeseries root.ln.wf01.wt01.status BOOLEAN; +create timeseries root.ln.wf01.wt01.temperature FLOAT; +create timeseries root.ln.wf02.wt02.hardware TEXT; +create timeseries root.ln.wf02.wt02.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.status BOOLEAN; +create timeseries root.sgcc.wf03.wt01.temperature FLOAT; ``` - 错误提示 ```sql -create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN -> error: encoding TS_2DIFF does not support BOOLEAN +create timeseries root.ln.wf02.wt02.status WITH DATATYPE=BOOLEAN; +> error: encoding TS_2DIFF does not support BOOLEAN; ``` #### 创建对齐时间序列 ```sql -CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT) +CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` #### 删除时间序列 ```sql -delete timeseries root.ln.wf01.wt01.status -delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware -delete timeseries root.ln.wf02.* -drop timeseries root.ln.wf02.* +delete timeseries root.ln.wf01.wt01.status; +delete timeseries root.ln.wf01.wt01.temperature, root.ln.wf02.wt02.hardware; +delete timeseries root.ln.wf02.*; +drop timeseries root.ln.wf02.*; ``` #### 查看时间序列 ```sql -SHOW TIMESERIES -SHOW TIMESERIES -SHOW TIMESERIES root.** -SHOW TIMESERIES root.ln.** -SHOW TIMESERIES root.ln.** limit 10 offset 10 -SHOW TIMESERIES root.ln.** where timeseries contains 'wf01.wt' -SHOW TIMESERIES root.ln.** where dataType=FLOAT +SHOW TIMESERIES; +SHOW TIMESERIES ; +SHOW TIMESERIES root.**; +SHOW TIMESERIES root.ln.**; +SHOW TIMESERIES root.ln.** limit 10 offset 10; +SHOW TIMESERIES root.ln.** where timeseries contains 'wf01.wt'; +SHOW TIMESERIES root.ln.** where dataType=FLOAT; SHOW TIMESERIES root.ln.** where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -SHOW LATEST TIMESERIES +SHOW LATEST TIMESERIES; ``` #### 统计时间序列数量 ```sql -COUNT TIMESERIES root.** -COUNT TIMESERIES root.ln.** -COUNT TIMESERIES root.ln.*.*.status -COUNT TIMESERIES root.ln.wf01.wt01.status -COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' -COUNT TIMESERIES root.** WHERE DATATYPE = INT64 -COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c' -COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c' -COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1 +COUNT TIMESERIES root.**; +COUNT TIMESERIES root.ln.**; +COUNT TIMESERIES root.ln.*.*.status; +COUNT TIMESERIES root.ln.wf01.wt01.status; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc'; +COUNT TIMESERIES root.** WHERE DATATYPE = INT64; +COUNT TIMESERIES root.** WHERE TAGS(unit) contains 'c'; +COUNT TIMESERIES root.** WHERE TAGS(unit) = 'c'; +COUNT TIMESERIES root.** WHERE TIMESERIES contains 'sgcc' group by level = 1; COUNT TIMESERIES root.** WHERE time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -COUNT TIMESERIES root.** GROUP BY LEVEL=1 -COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2 -COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2 +COUNT TIMESERIES root.** GROUP BY LEVEL=1; +COUNT TIMESERIES root.ln.** GROUP BY LEVEL=2; +COUNT TIMESERIES root.ln.wf01.* GROUP BY LEVEL=2; ``` #### 标签点管理 ```sql -create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2) +create timeseries root.turbine.d1.s1(temprature) with datatype=FLOAT tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2); ``` - 重命名标签或属性 ```sql -ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1; ``` - 重新设置标签或属性的值 ```sql -ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1; ``` - 删除已经存在的标签或属性 ```sql -ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2; ``` - 添加新的标签 ```sql -ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4; ``` - 添加新的属性 ```sql -ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4; ``` - 更新插入别名,标签和属性 ```sql -ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4); ``` - 使用标签作为过滤条件查询时间序列 @@ -164,37 +164,37 @@ SHOW TIMESERIES (<`PathPattern`>)? timeseriesWhereClause 返回给定路径的下的所有满足条件的时间序列信息: ```sql -ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c -ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1 -show timeseries root.ln.** where TAGS(unit)='c' -show timeseries root.ln.** where TAGS(description) contains 'test1' +ALTER timeseries root.ln.wf02.wt02.hardware ADD TAGS unit=c; +ALTER timeseries root.ln.wf02.wt02.status ADD TAGS description=test1; +show timeseries root.ln.** where TAGS(unit)='c'; +show timeseries root.ln.** where TAGS(description) contains 'test1'; ``` - 使用标签作为过滤条件统计时间序列数量 ```sql -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause -COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL= +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause; +COUNT TIMESERIES (<`PathPattern`>)? timeseriesWhereClause GROUP BY LEVEL=; ``` 返回给定路径的下的所有满足条件的时间序列的数量: ```sql -count timeseries -count timeseries root.** where TAGS(unit)='c' -count timeseries root.** where TAGS(unit)='c' group by level = 2 +count timeseries; +count timeseries root.** where TAGS(unit)='c'; +count timeseries root.** where TAGS(unit)='c' group by level = 2; ``` 创建对齐时间序列: ```sql -create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)) +create aligned timeseries root.sg1.d1(s1 INT32 tags(tag1=v1, tag2=v2) attributes(attr1=v1, attr2=v2), s2 DOUBLE tags(tag3=v3, tag4=v4) attributes(attr3=v3, attr4=v4)); ``` 支持查询: ```sql -show timeseries where TAGS(tag1)='v1' +show timeseries where TAGS(tag1)='v1'; ``` ### 1.3 时间序列路径管理 @@ -202,121 +202,93 @@ show timeseries where TAGS(tag1)='v1' #### 查看路径的所有子路径 ```sql -SHOW CHILD PATHS pathPattern -- 查询 root.ln 的下一层:show child paths root.ln -- 查询形如 root.xx.xx.xx 的路径:show child paths root.*.* +SHOW CHILD PATHS pathPattern; +- 查询 root.ln 的下一层; +show child paths root.ln; +- 查询形如 root.xx.xx.xx 的路径; +show child paths root.*.*; ``` #### 查看路径的所有子节点 ```sql -SHOW CHILD NODES pathPattern - -- 查询 root 的下一层:show child nodes root -- 查询 root.ln 的下一层 :show child nodes root.ln +SHOW CHILD NODES pathPattern; +- 查询 root 的下一层; +show child nodes root; +- 查询 root.ln 的下一层; +show child nodes root.ln; ``` #### 查看设备 ```sql -IoTDB> show devices - -IoTDB> show devices root.ln.** - -IoTDB> show devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +show devices; +show devices root.ln.**; +show devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; ``` ##### 查看设备及其 database 信息 ```sql -IoTDB> show devices with database - -IoTDB> show devices root.ln.** with database +show devices with database; +show devices root.ln.** with database; ``` #### 统计节点数 ```sql -IoTDB > COUNT NODES root.** LEVEL=2 - -IoTDB > COUNT NODES root.ln.** LEVEL=2 - -IoTDB > COUNT NODES root.ln.wf01.* LEVEL=3 - -IoTDB > COUNT NODES root.**.temperature LEVEL=3 +COUNT NODES root.** LEVEL=2; +COUNT NODES root.ln.** LEVEL=2; +COUNT NODES root.ln.wf01.* LEVEL=3; +COUNT NODES root.**.temperature LEVEL=3; ``` #### 统计设备数量 ```sql - -IoTDB> count devices - -IoTDB> count devices root.ln.** - -IoTDB> count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; +count devices; +count devices root.ln.**; +count devices where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; ``` ### 1.4 数据存活时间管理 #### 设置 TTL ```sql -IoTDB> set ttl to root.ln 3600000 -``` -```sql -IoTDB> set ttl to root.sgcc.** 3600000 -``` -```sql -IoTDB> set ttl to root.** 3600000 +set ttl to root.ln 3600000; +set ttl to root.sgcc.** 3600000; +set ttl to root.** 3600000; ``` #### 取消 TTL ```sql -IoTDB> unset ttl from root.ln -``` -```sql -IoTDB> unset ttl from root.sgcc.** -``` -```sql -IoTDB> unset ttl from root.** +unset ttl from root.ln; +unset ttl from root.sgcc.**; +unset ttl from root.**; ``` #### 显示 TTL ```sql -IoTDB> SHOW ALL TTL -``` -```sql -IoTDB> SHOW TTL ON pathPattern -``` -```sql -IoTDB> show DEVICES +SHOW ALL TTL; +SHOW TTL ON pathPattern; +show DEVICES; ``` ## 2. 写入数据 ### 2.1 写入单列数据 ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) -``` -```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'),(2, 'v1') +insert into root.ln.wf02.wt02(timestamp,status) values(1,true); +insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1'),(2, 'v1'); ``` ### 2.2 写入多列数据 ```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) values (2, false, 'v2') -``` -```sql -IoTDB > insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4') +insert into root.ln.wf02.wt02(timestamp, status, hardware) values (2, false, 'v2'); +insert into root.ln.wf02.wt02(timestamp, status, hardware) VALUES (3, false, 'v3'),(4, true, 'v4'); ``` ### 2.3 使用服务器时间戳 ```sql -IoTDB > insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2') +insert into root.ln.wf02.wt02(status, hardware) values (false, 'v2'); ``` ### 2.4 写入对齐时间序列数据 ```sql -IoTDB > create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE) -``` -```sql -IoTDB > insert into root.sg1.d1(timestamp, s1, s2) aligned values(1, 1, 1) -``` -```sql -IoTDB > insert into root.sg1.d1(timestamp, s1, s2) aligned values(2, 2, 2), (3, 3, 3) -``` -```sql -IoTDB > select * from root.sg1.d1 +create aligned timeseries root.sg1.d1(s1 INT32, s2 DOUBLE); +insert into root.sg1.d1(timestamp, s1, s2) aligned values(1, 1, 1); +insert into root.sg1.d1(timestamp, s1, s2) aligned values(2, 2, 2), (3, 3, 3); +select * from root.sg1.d1; ``` ### 2.5 加载 TsFile 文件数据 @@ -342,43 +314,24 @@ load '' [sglevel=int][onSuccess=delete/none] ### 3.1 删除单列数据 ```sql delete from root.ln.wf02.wt02.status where time<=2017-11-01T16:26:00; -``` -```sql delete from root.ln.wf02.wt02.status where time>=2017-01-01T00:00:00 and time<=2017-11-01T16:26:00; -``` -```sql -delete from root.ln.wf02.wt02.status where time < 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time <= 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time < 20 and time > 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10 -``` -```sql -delete from root.ln.wf02.wt02.status where time > 20 -``` -```sql -delete from root.ln.wf02.wt02.status where time >= 20 -``` -```sql -delete from root.ln.wf02.wt02.status where time = 20 +delete from root.ln.wf02.wt02.status where time < 10; +delete from root.ln.wf02.wt02.status where time <= 10; +delete from root.ln.wf02.wt02.status where time < 20 and time > 10; +delete from root.ln.wf02.wt02.status where time <= 20 and time >= 10; +delete from root.ln.wf02.wt02.status where time > 20; +delete from root.ln.wf02.wt02.status where time >= 20; +delete from root.ln.wf02.wt02.status where time = 20; ``` 出错: ```sql -delete from root.ln.wf02.wt02.status where time > 4 or time < 0 - -Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic - -expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' +delete from root.ln.wf02.wt02.status where time > 4 or time < 0; +Msg: 303: Check metadata error: For delete statement, where clause can only contain atomic expressions like : time > XXX, time <= XXX, or two atomic expressions connected by 'AND' ``` 删除时间序列中的所有数据: ```sql -delete from root.ln.wf02.wt02.status +delete from root.ln.wf02.wt02.status; ``` ### 3.2 删除多列数据 ```sql @@ -386,8 +339,7 @@ delete from root.ln.wf02.wt02.* where time <= 2017-11-01T16:26:00; ``` 声明式的编程方式: ```sql -IoTDB> delete from root.ln.wf03.wt02.status where time < now() - +delete from root.ln.wf03.wt02.status where time < now(); Msg: The statement is executed successfully. ``` ## 4. 数据查询 @@ -396,7 +348,7 @@ Msg: The statement is executed successfully. #### 时间过滤查询 ```sql -select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000 +select temperature from root.ln.wf01.wt01 where time < 2017-11-01T00:08:00.000; ``` #### 根据一个时间区间选择多列数据 ```sql @@ -427,9 +379,7 @@ select s1 as temperature, s2 as speed from root.ln.wf01.wt01; 不支持: ```sql select s1, count(s1) from root.sg.d1; - select sin(s1), count(s1) from root.sg.d1; - select s1, count(s1) from root.sg.d1 group by ([10,100),10ms); ``` ##### 时间序列查询嵌套表达式 @@ -437,67 +387,49 @@ select s1, count(s1) from root.sg.d1 group by ([10,100),10ms); 示例 1: ```sql select a, - -​ b, - -​ ((a + 1) * 2 - 1) % 2 + 1.5, - -​ sin(a + sin(a + sin(b))), - -​ -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 - + b, + ((a + 1) * 2 - 1) % 2 + 1.5, + sin(a + sin(a + sin(b))), + -(a + b) * (sin(a + b) * sin(a + b) + cos(a + b) * cos(a + b)) + 1 from root.sg1; ``` 示例 2: ```sql -select (a + b) * 2 + sin(a) from root.sg - +select (a + b) * 2 + sin(a) from root.sg; +``` 示例 3: - -select (a + *) / 2 from root.sg1 - +```sql +select (a + *) / 2 from root.sg1; +``` 示例 4: - -select (a + b) * 3 from root.sg, root.ln +```sql +select (a + b) * 3 from root.sg, root.ln; ``` ##### 聚合查询嵌套表达式 示例 1: ```sql select avg(temperature), - -​ sin(avg(temperature)), - -​ avg(temperature) + 1, - -​ -sum(hardware), - -​ avg(temperature) + sum(hardware) - + sin(avg(temperature)), + avg(temperature) + 1, + -sum(hardware), + avg(temperature) + sum(hardware) from root.ln.wf01.wt01; ``` 示例 2: ```sql select avg(*), - -​ (avg(*) + 1) * 3 / 2 -1 - -from root.sg1 + (avg(*) + 1) * 3 / 2 -1 +from root.sg1; ``` 示例 3: ```sql select avg(temperature), - -​ sin(avg(temperature)), - -​ avg(temperature) + 1, - -​ -sum(hardware), - -​ avg(temperature) + sum(hardware) as custom_sum - + sin(avg(temperature)), + avg(temperature) + 1, + -sum(hardware), + avg(temperature) + sum(hardware) as custom_sum from root.ln.wf01.wt01 - GROUP BY([10, 90), 10ms); ``` #### 最新点查询 @@ -510,15 +442,15 @@ select last [COMMA ]* from < PrefixPath > [COMMA < PrefixPath >]* < 查询 root.ln.wf01.wt01.status 的最新数据点 ```sql -IoTDB> select last status from root.ln.wf01.wt01 +select last status from root.ln.wf01.wt01; ``` 查询 root.ln.wf01.wt01 下 status,temperature 时间戳大于等于 2017-11-07T23:50:00 的最新数据点 ```sql -IoTDB> select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00 +select last status, temperature from root.ln.wf01.wt01 where time >= 2017-11-07T23:50:00; ``` 查询 root.ln.wf01.wt01 下所有序列的最新数据点,并按照序列名降序排列 ```sql -IoTDB> select last * from root.ln.wf01.wt01 order by timeseries desc; +select last * from root.ln.wf01.wt01 order by timeseries desc; ``` ### 4.3 查询过滤条件 @@ -573,20 +505,20 @@ select code from root.sg1.d1 where temperature is not null; 查询 `root.sg.d1` 下 `value` 含有`'cc'`的数据 ```sql -IoTDB> select * from root.sg.d1 where value like '%cc%' +select * from root.sg.d1 where value like '%cc%'; ``` 查询 `root.sg.d1` 下 `value` 中间为 `'b'`、前后为任意单个字符的数据 ```sql -IoTDB> select * from root.sg.device where value like '_b_' +select * from root.sg.device where value like '_b_'; ``` 查询 root.sg.d1 下 value 值为26个英文字符组成的字符串 ```sql -IoTDB> select * from root.sg.d1 where value regexp '^[A-Za-z]+$' +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; ``` 查询 root.sg.d1 下 value 值为26个小写英文字符组成的字符串且时间大于100的 ```sql -IoTDB> select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; ``` ### 4.4 分段分组聚合 @@ -629,23 +561,23 @@ select count(status) from root.ln.wf01.wt01 group by ([2017-11-01 00:00:00, 2017 统计不同 database 下 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 1 +select count(status) from root.** group by level = 1; ``` 统计不同设备下 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 3 +select count(status) from root.** group by level = 3; ``` 统计不同 database 下的不同设备中 status 序列的数据点个数 ```sql -select count(status) from root.** group by level = 1, 3 +select count(status) from root.** group by level = 1, 3; ``` 查询所有序列下温度传感器 temperature 的最大值 ```sql -select max_value(temperature) from root.** group by level = 0 +select max_value(temperature) from root.** group by level = 0; ``` 查询某一层级下所有传感器拥有的总数据点数 ```sql -select count(*) from root.ln.** group by level = 2 +select count(*) from root.ln.** group by level = 2; ``` #### 标签分组聚合 @@ -667,15 +599,15 @@ group by variation(controlExpression[,delta][,ignoreNull=true/false]) ``` ##### delta=0时的等值事件分段 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6); ``` 指定ignoreNull为false ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, ignoreNull=false); ``` ##### delta!=0时的差值事件分段 ```sql -select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4) +select __endTime, avg(s1), count(s2), sum(s3) from root.sg.d group by variation(s6, 4); ``` #### 条件分段聚合 ```sql @@ -683,11 +615,11 @@ group by condition(predict,[keep>/>=/=/<=/<]threshold,[,ignoreNull=true/false]) ``` 查询至少连续两行以上的charging_status=1的数据 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=true); ``` 当设置`ignoreNull`为false时,遇到null值为将其视为一个不满足条件的行,得到结果原先的分组被含null的行拆分 ```sql -select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false) +select max_time(charging_status),count(vehicle_status),last_value(soc) from root.** group by condition(charging_status=1,KEEP>=2,ignoreNull=false); ``` #### 会话分段聚合 ```sql @@ -695,38 +627,35 @@ group by session(timeInterval) ``` 按照不同的时间单位设定时间间隔 ```sql -select __endTime,count(*) from root.** group by session(1d) +select __endTime,count(*) from root.** group by session(1d); ``` 和`HAVING`、`ALIGN BY DEVICE`共同使用 ```sql -select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device +select __endTime,sum(hardware) from root.ln.wf02.wt01 group by session(50s) having sum(hardware)>0 align by device; ``` #### 点数分段聚合 ```sql group by count(controlExpression, size[,ignoreNull=true/false]) ``` -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5) - +```sql +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5); +``` 当使用ignoreNull将null值也考虑进来 ```sql -select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false) +select count(charging_stauts), first_value(soc) from root.sg group by count(charging_status,5,ignoreNull=false); ``` ### 4.5 聚合结果过滤 不正确的: ```sql -select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1 - -select count(s1) from root.** group by ([1,3),1ms) having s1 > 1 - -select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1 - -select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1 +select count(s1) from root.** group by ([1,3),1ms) having sum(s1) > s1; +select count(s1) from root.** group by ([1,3),1ms) having s1 > 1; +select count(s1) from root.** group by ([1,3),1ms), level=1 having sum(d1.s1) > 1; +select count(d1.s1) from root.** group by ([1,3),1ms), level=1 having sum(s1) > 1; ``` SQL 示例: ```sql select count(s1) from root.** group by ([1,11),2ms), level=1 having count(s2) > 2; - select count(s1), count(s2) from root.** group by ([1,11),2ms) having count(s2) > 1 align by device; ``` ### 4.6 结果集补空值 @@ -759,37 +688,37 @@ select temperature, status from root.sgcc.wf03.wt01 where time >= 2017-11-01T16: 基本的 `LIMIT` 子句 ```sql -select status, temperature from root.ln.wf01.wt01 limit 10 +select status, temperature from root.ln.wf01.wt01 limit 10; ``` 带 `OFFSET` 的 `LIMIT` 子句 ```sql -select status, temperature from root.ln.wf01.wt01 limit 5 offset 3 +select status, temperature from root.ln.wf01.wt01 limit 5 offset 3; ``` `LIMIT` 子句与 `WHERE` 子句结合 ```sql -select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 5 offset 3 +select status,temperature from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time< 2017-11-01T00:12:00.000 limit 5 offset 3; ``` `LIMIT` 子句与 `GROUP BY` 子句组合 ```sql -select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3 +select count(status), max_value(temperature) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) limit 4 offset 3; ``` #### 按列分页 基本的 `SLIMIT` 子句 ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1; ``` 带 `SOFFSET` 的 `SLIMIT` 子句 ```sql -select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1 +select * from root.ln.wf01.wt01 where time > 2017-11-01T00:05:00.000 and time < 2017-11-01T00:12:00.000 slimit 1 soffset 1; ``` `SLIMIT` 子句与 `GROUP BY` 子句结合 ```sql -select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1 +select max_value(*) from root.ln.wf01.wt01 group by ([2017-11-01T00:00:00, 2017-11-07T23:00:00),1d) slimit 1 soffset 1; ``` `SLIMIT` 子句与 `LIMIT` 子句结合 ```sql -select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0 +select * from root.ln.wf01.wt01 limit 10 offset 100 slimit 2 soffset 0; ``` ### 4.8 排序 @@ -811,7 +740,7 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; ``` 对聚合后的结果进行排序 ```sql -select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device +select count(*) from root.ln.** group by ((2017-11-01T00:00:00.000+08:00,2017-11-01T00:03:00.000+08:00],1m) order by device asc,time asc align by device; ``` ### 4.9 查询对齐模式 @@ -824,50 +753,36 @@ select * from root.ln.** where time <= 2017-11-01T00:01:00 align by device; #### 整体描述 ```sql selectIntoStatement - -​ : SELECT - -​ resultColumn [, resultColumn] ... - -​ INTO intoItem [, intoItem] ... - -​ FROM prefixPath [, prefixPath] ... - -​ [WHERE whereCondition] - -​ [GROUP BY groupByTimeClause, groupByLevelClause] - -​ [FILL ({PREVIOUS | LINEAR | constant} (, interval=DURATION_LITERAL)?)] - -​ [LIMIT rowLimit OFFSET rowOffset] - -​ [ALIGN BY DEVICE] - -​ ; - - + : SELECT + resultColumn [, resultColumn] ... + INTO intoItem [, intoItem] ... + FROM prefixPath [, prefixPath] ... + [WHERE whereCondition] + [GROUP BY groupByTimeClause, groupByLevelClause] + [FILL ({PREVIOUS | LINEAR | constant} (, interval=DURATION_LITERAL)?)] + [LIMIT rowLimit OFFSET rowOffset] + [ALIGN BY DEVICE] + ; intoItem - -​ : [ALIGNED] intoDevicePath '(' intoMeasurementName [',' intoMeasurementName]* ')' - -​ ; + : [ALIGNED] intoDevicePath '(' intoMeasurementName [',' intoMeasurementName]* ')' + ; ``` 按时间对齐,将 `root.sg` database 下四条序列的查询结果写入到 `root.sg_copy` database 下指定的四条序列中 ```sql -IoTDB> select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; +select s1, s2 into root.sg_copy.d1(t1), root.sg_copy.d2(t1, t2), root.sg_copy.d1(t2) from root.sg.d1, root.sg.d2; ``` 按时间对齐,将聚合查询的结果存储到指定序列中 ```sql -IoTDB> select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); +select count(s1 + s2), last_value(s2) into root.agg.count(s1_add_s2), root.agg.last_value(s2) from root.sg.d1 group by ([0, 100), 10ms); ``` 按设备对齐 ```sql -IoTDB> select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; +select s1, s2 into root.sg_copy.d1(t1, t2), root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` 按设备对齐,将表达式计算的结果存储到指定序列中 ```sql -IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; +select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) from root.sg.d1, root.sg.d2 align by device; ``` #### 使用变量占位符 @@ -875,21 +790,15 @@ IoTDB> select s1 + s2 into root.expr.add(d1s1_d1s2), root.expr.add(d2s1_d2s2) fr ###### 目标设备不使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select s1, s2 - into root.sg_copy.d1(::), root.sg_copy.d2(s1), root.sg_copy.d1(${3}), root.sg_copy.d2(::) - from root.sg.d1, root.sg.d2; ``` 该语句等价于: ```sql - select s1, s2 - into root.sg_copy.d1(s1), root.sg_copy.d2(s1), root.sg_copy.d1(s2), root.sg_copy.d2(s2) - from root.sg.d1, root.sg.d2; ``` @@ -897,9 +806,7 @@ from root.sg.d1, root.sg.d2; ```sql select d1.s1, d1.s2, d2.s3, d3.s4 - into ::(s1_1, s2_2), root.sg.d2_2(s3_3), root.${2}_copy.::(s4) - from root.sg; ``` @@ -913,47 +820,37 @@ select * into root.sg_bk.::(::) from root.sg.**; ###### 目标设备不使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select s1, s2, s3, s4 - into root.backup_sg.d1(s1, s2, s3, s4), root.backup_sg.d2(::), root.sg.d3(backup_${4}) - from root.sg.d1, root.sg.d2, root.sg.d3 - align by device; ``` ###### 目标设备使用变量占位符 & 目标物理量列表不使用变量占位符 ```sql - select avg(s1), sum(s2) + sum(s3), count(s4) - into root.agg_${2}.::(avg_s1, sum_s2_add_s3, count_s4) - from root.** - align by device; ``` ###### 目标设备使用变量占位符 & 目标物理量列表使用变量占位符 ```sql - select * into ::(backup_${4}) from root.sg.** align by device; ``` #### 指定目标序列为对齐序列 ```sql - select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from root.sg.d1, root.sg.d2 align by device; ``` ## 5. 运维语句 生成对应的查询计划 ```sql -explain select s1,s2 from root.sg.d1 +explain select s1,s2 from root.sg.d1; ``` 执行对应的查询语句,并获取分析结果 ```sql -explain analyze select s1,s2 from root.sg.d1 order by s1 +explain analyze select s1,s2 from root.sg.d1 order by s1; ``` ## 6. 运算符 @@ -964,7 +861,7 @@ explain analyze select s1,s2 from root.sg.d1 order by s1 更多见文档 [Arithmetic Operators and Functions](./Operator-and-Expression.md#_1-1-算数运算符) ```sql -select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1 +select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root.sg.d1; ``` ### 6.2 比较运算符 @@ -972,27 +869,27 @@ select s1, - s1, s2, + s2, s1 + s2, s1 - s2, s1 * s2, s1 / s2, s1 % s2 from root 更多见文档[Comparison Operators and Functions](./Operator-and-Expression.md#_1-2-比较运算符) ```sql -# Basic comparison operators +# Basic comparison operators; select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; -# `BETWEEN ... AND ...` operator +# `BETWEEN ... AND ...` operator; select temperature from root.sg1.d1 where temperature between 36.5 and 40; select temperature from root.sg1.d1 where temperature not between 36.5 and 40; -# Fuzzy matching operator: Use `Like` for fuzzy matching -select * from root.sg.d1 where value like '%cc%' -select * from root.sg.device where value like '_b_' +# Fuzzy matching operator: Use `Like` for fuzzy matching; +select * from root.sg.d1 where value like '%cc%'; +select * from root.sg.device where value like '_b_'; -# Fuzzy matching operator: Use `Regexp` for fuzzy matching -select * from root.sg.d1 where value regexp '^[A-Za-z]+$' -select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100 +# Fuzzy matching operator: Use `Regexp` for fuzzy matching; +select * from root.sg.d1 where value regexp '^[A-Za-z]+$'; +select * from root.sg.d1 where value regexp '^[a-z]+$' and time > 100; select b, b like '1%', b regexp '[0-2]' from root.test; -# `IS NULL` operator +# `IS NULL` operator; select code from root.sg1.d1 where temperature is null; select code from root.sg1.d1 where temperature is not null; -# `IN` operator +# `IN` operator; select code from root.sg1.d1 where code in ('200', '300', '400', '500'); select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); select a, a in (1, 2) from root.test; @@ -1048,25 +945,25 @@ select ts, in_range(ts, 'lower'='2', 'upper'='3.1') from root.test; ```sql select s1, string_contains(s1, 's'='warn') from root.sg1.d4; select s1, string_matches(s1, 'regex'='[^\\s]+37229') from root.sg1.d4; -select s1, length(s1) from root.sg1.d1 -select s1, locate(s1, "target"="1") from root.sg1.d1 -select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 -select s1, startswith(s1, "target"="1") from root.sg1.d1 -select s1, endswith(s1, "target"="1") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 -select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 -select s1, substring(s1 from 1 for 2) from root.sg1.d1 -select s1, replace(s1, 'es', 'tt') from root.sg1.d1 -select s1, upper(s1) from root.sg1.d1 -select s1, lower(s1) from root.sg1.d1 -select s3, trim(s3) from root.sg1.d1 -select s1, s2, strcmp(s1, s2) from root.sg1.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1 -select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1 -select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 +select s1, length(s1) from root.sg1.d1; +select s1, locate(s1, "target"="1") from root.sg1.d1; +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1; +select s1, startswith(s1, "target"="1") from root.sg1.d1; +select s1, endswith(s1, "target"="1") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1; +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1; +select s1, substring(s1 from 1 for 2) from root.sg1.d1; +select s1, replace(s1, 'es', 'tt') from root.sg1.d1; +select s1, upper(s1) from root.sg1.d1; +select s1, lower(s1) from root.sg1.d1; +select s3, trim(s3) from root.sg1.d1; +select s1, s2, strcmp(s1, s2) from root.sg1.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1; +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1; +select regexmatch(s1, "regex"="\d+\.\d+\.\d+\.\d+", "group"="0") from root.test.d1; +select regexreplace(s1, "regex"="192\.168\.0\.(\d+)", "replace"="cluster-$1", "limit"="1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="-1") from root.test.d1; +select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1; ``` ### 7.5 数据类型转换函数 @@ -1074,7 +971,7 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 更多见文档[Data Type Conversion Function](./Operator-and-Expression.md#_2-5-数据类型转换函数) ```sql -SELECT cast(s1 as INT32) from root.sg +SELECT cast(s1 as INT32) from root.sg; ``` ### 7.6 常序列生成函数 @@ -1122,8 +1019,8 @@ select equal_size_bucket_agg_sample(temperature, 'type'='avg','proportion'='0.1' select equal_size_bucket_m4_sample(temperature, 'proportion'='0.1') as M4_sample from root.ln.wf01.wt01; select equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='avg', 'number'='2') as outlier_avg_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='stendis', 'number'='2') as outlier_stendis_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='cos', 'number'='2') as outlier_cos_sample, equal_size_bucket_outlier_sample(temperature, 'proportion'='0.1', 'type'='prenextdis', 'number'='2') as outlier_prenextdis_sample from root.ln.wf01.wt01; -select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1 -select M4(s1,'windowSize'='10') from root.vehicle.d1 +select M4(s1,'timeInterval'='25','displayWindowBegin'='0','displayWindowEnd'='100') from root.vehicle.d1; +select M4(s1,'windowSize'='10') from root.vehicle.d1; ``` ### 7.12 时间序列处理函数 @@ -1131,7 +1028,7 @@ select M4(s1,'windowSize'='10') from root.vehicle.d1 更多见文档[Time-Series](./Operator-and-Expression.md#_2-11-时间序列处理函数) ```sql -select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1 +select change_points(s1), change_points(s2), change_points(s3), change_points(s4), change_points(s5), change_points(s6) from root.testChangePoints.d1; ``` ## 8. 数据质量函数库 @@ -1143,24 +1040,24 @@ select change_points(s1), change_points(s2), change_points(s3), change_points(s4 更多见文档[Data-Quality](../SQL-Manual/UDF-Libraries.md#数据质量) ```sql -# Completeness -select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Completeness; +select completeness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select completeness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Consistency -select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Consistency; +select consistency(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select consistency(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Timeliness -select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Timeliness; +select timeliness(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select timeliness(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Validity -select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 -select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00 +# Validity; +select Validity(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; +select Validity(s1,"window"="15") from root.test.d1 where time <= 2020-01-01 00:01:00; -# Accuracy -select Accuracy(t1,t2,t3,m1,m2,m3) from root.test +# Accuracy; +select Accuracy(t1,t2,t3,m1,m2,m3) from root.test; ``` ### 8.2 数据画像 @@ -1168,79 +1065,79 @@ select Accuracy(t1,t2,t3,m1,m2,m3) from root.test 更多见文档[Data-Profiling](../SQL-Manual/UDF-Libraries.md#数据画像) ```sql -# ACF -select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05 +# ACF; +select acf(s1) from root.test.d1 where time <= 2020-01-01 00:00:05; -# Distinct -select distinct(s2) from root.test.d2 +# Distinct; +select distinct(s2) from root.test.d2; -# Histogram -select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1 +# Histogram; +select histogram(s1,"min"="1","max"="20","count"="10") from root.test.d1; -# Integral -select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 -select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10 +# Integral; +select integral(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; +select integral(s1, "unit"="1m") from root.test.d1 where time <= 2020-01-01 00:00:10; -# IntegralAvg -select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10 +# IntegralAvg; +select integralavg(s1) from root.test.d1 where time <= 2020-01-01 00:00:10; -# Mad -select mad(s0) from root.test -select mad(s0, "error"="0.01") from root.test +# Mad; +select mad(s0) from root.test; +select mad(s0, "error"="0.01") from root.test; -# Median -select median(s0, "error"="0.01") from root.test +# Median; +select median(s0, "error"="0.01") from root.test; -# MinMax -select minmax(s1) from root.test +# MinMax; +select minmax(s1) from root.test; -# Mode -select mode(s2) from root.test.d2 +# Mode; +select mode(s2) from root.test.d2; -# MvAvg -select mvavg(s1, "window"="3") from root.test +# MvAvg; +select mvavg(s1, "window"="3") from root.test; -# PACF -select pacf(s1, "lag"="5") from root.test +# PACF; +select pacf(s1, "lag"="5") from root.test; -# Percentile -select percentile(s0, "rank"="0.2", "error"="0.01") from root.test +# Percentile; +select percentile(s0, "rank"="0.2", "error"="0.01") from root.test; -# Quantile -select quantile(s0, "rank"="0.2", "K"="800") from root.test +# Quantile; +select quantile(s0, "rank"="0.2", "K"="800") from root.test; -# Period -select period(s1) from root.test.d3 +# Period; +select period(s1) from root.test.d3; -# QLB -select QLB(s1) from root.test.d1 +# QLB; +select QLB(s1) from root.test.d1; -# Resample -select resample(s1,'every'='5m','interp'='linear') from root.test.d1 -select resample(s1,'every'='30m','aggr'='first') from root.test.d1 -select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1 +# Resample; +select resample(s1,'every'='5m','interp'='linear') from root.test.d1; +select resample(s1,'every'='30m','aggr'='first') from root.test.d1; +select resample(s1,'every'='30m','start'='2021-03-06 15:00:00') from root.test.d1; -# Sample -select sample(s1,'method'='reservoir','k'='5') from root.test.d1 -select sample(s1,'method'='isometric','k'='5') from root.test.d1 +# Sample; +select sample(s1,'method'='reservoir','k'='5') from root.test.d1; +select sample(s1,'method'='isometric','k'='5') from root.test.d1; -# Segment -select segment(s1, "error"="0.1") from root.test +# Segment; +select segment(s1, "error"="0.1") from root.test; -# Skew -select skew(s1) from root.test.d1 +# Skew; +select skew(s1) from root.test.d1; -# Spline -select spline(s1, "points"="151") from root.test +# Spline; +select spline(s1, "points"="151") from root.test; -# Spread -select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30 +# Spread; +select spread(s1) from root.test.d1 where time <= 2020-01-01 00:00:30; -# Stddev -select stddev(s1) from root.test.d1 +# Stddev; +select stddev(s1) from root.test.d1; -# ZScore -select zscore(s1) from root.test +# ZScore; +select zscore(s1) from root.test; ``` ### 8.3 异常检测 @@ -1248,34 +1145,34 @@ select zscore(s1) from root.test 更多见文档[Anomaly-Detection](../SQL-Manual/UDF-Libraries.md#异常检测) ```sql -# IQR -select iqr(s1) from root.test +# IQR; +select iqr(s1) from root.test; -# KSigma -select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# KSigma; +select ksigma(s1,"k"="1.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# LOF -select lof(s1,s2) from root.test.d1 where time<1000 -select lof(s1, "method"="series") from root.test.d1 where time<1000 +# LOF; +select lof(s1,s2) from root.test.d1 where time<1000; +select lof(s1, "method"="series") from root.test.d1 where time<1000; -# MissDetect -select missdetect(s2,'minlen'='10') from root.test.d2 +# MissDetect; +select missdetect(s2,'minlen'='10') from root.test.d2; -# Range -select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30 +# Range; +select range(s1,"lower_bound"="101.0","upper_bound"="125.0") from root.test.d1 where time <= 2020-01-01 00:00:30; -# TwoSidedFilter -select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test +# TwoSidedFilter; +select TwoSidedFilter(s0, 'len'='5', 'threshold'='0.3') from root.test; -# Outlier -select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test +# Outlier; +select outlier(s1,"r"="5.0","k"="4","w"="10","s"="5") from root.test; -# MasterTrain -select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test +# MasterTrain; +select MasterTrain(lo,la,m_lo,m_la,'p'='3','eta'='1.0') from root.test; -# MasterDetect -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test -select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test +# MasterDetect; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='repair','p'='3','k'='3','eta'='1.0') from root.test; +select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3','eta'='1.0') from root.test; ``` ### 8.4 频域分析 @@ -1283,31 +1180,31 @@ select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3 更多见文档[Frequency-Domain](../SQL-Manual/UDF-Libraries.md#频域分析) ```sql -# Conv -select conv(s1,s2) from root.test.d2 +# Conv; +select conv(s1,s2) from root.test.d2; -# Deconv -select deconv(s3,s2) from root.test.d2 -select deconv(s3,s2,'result'='remainder') from root.test.d2 +# Deconv; +select deconv(s3,s2) from root.test.d2; +select deconv(s3,s2,'result'='remainder') from root.test.d2; -# DWT -select dwt(s1,"method"="haar") from root.test.d1 +# DWT; +select dwt(s1,"method"="haar") from root.test.d1; -# FFT -select fft(s1) from root.test.d1 -select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1 +# FFT; +select fft(s1) from root.test.d1; +select fft(s1, 'result'='real', 'compress'='0.99'), fft(s1, 'result'='imag','compress'='0.99') from root.test.d1; -# HighPass -select highpass(s1,'wpass'='0.45') from root.test.d1 +# HighPass; +select highpass(s1,'wpass'='0.45') from root.test.d1; -# IFFT -select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1 +# IFFT; +select ifft(re, im, 'interval'='1m', 'start'='2021-01-01 00:00:00') from root.test.d1; -# LowPass -select lowpass(s1,'wpass'='0.45') from root.test.d1 +# LowPass; +select lowpass(s1,'wpass'='0.45') from root.test.d1; -# Envelope -select envelope(s1) from root.test.d1 +# Envelope; +select envelope(s1) from root.test.d1; ``` ### 8.5 数据匹配 @@ -1315,20 +1212,20 @@ select envelope(s1) from root.test.d1 更多见文档[Data-Matching](../SQL-Manual/UDF-Libraries.md#数据匹配) ```sql -# Cov -select cov(s1,s2) from root.test.d2 +# Cov; +select cov(s1,s2) from root.test.d2; -# DTW -select dtw(s1,s2) from root.test.d2 +# DTW; +select dtw(s1,s2) from root.test.d2; -# Pearson -select pearson(s1,s2) from root.test.d2 +# Pearson; +select pearson(s1,s2) from root.test.d2; -# PtnSym -select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1 +# PtnSym; +select ptnsym(s4, 'window'='5', 'threshold'='0') from root.test.d1; -# XCorr -select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 +# XCorr; +select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05; ``` ### 8.6 数据修复 @@ -1336,24 +1233,24 @@ select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 更多见文档[Data-Repairing](../SQL-Manual/UDF-Libraries.md#数据修复) ```sql -# TimestampRepair -select timestamprepair(s1,'interval'='10000') from root.test.d2 -select timestamprepair(s1) from root.test.d2 +# TimestampRepair; +select timestamprepair(s1,'interval'='10000') from root.test.d2; +select timestamprepair(s1) from root.test.d2; -# ValueFill -select valuefill(s1) from root.test.d2 -select valuefill(s1,"method"="previous") from root.test.d2 +# ValueFill; +select valuefill(s1) from root.test.d2; +select valuefill(s1,"method"="previous") from root.test.d2; -# ValueRepair -select valuerepair(s1) from root.test.d2 -select valuerepair(s1,'method'='LsGreedy') from root.test.d2 +# ValueRepair; +select valuerepair(s1) from root.test.d2; +select valuerepair(s1,'method'='LsGreedy') from root.test.d2; -# MasterRepair -select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test +# MasterRepair; +select MasterRepair(t1,t2,t3,m1,m2,m3) from root.test; -# SeasonalRepair -select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2 -select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 +# SeasonalRepair; +select seasonalrepair(s1,'period'=3,'k'=2) from root.test.d2; +select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2; ``` ### 8.7 序列发现 @@ -1361,12 +1258,12 @@ select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 更多见文档[Series-Discovery](../SQL-Manual/UDF-Libraries.md#序列发现) ```sql -# ConsecutiveSequences -select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1 -select consecutivesequences(s1,s2) from root.test.d1 +# ConsecutiveSequences; +select consecutivesequences(s1,s2,'gap'='5m') from root.test.d1; +select consecutivesequences(s1,s2) from root.test.d1; -# ConsecutiveWindows -select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 +# ConsecutiveWindows; +select consecutivewindows(s1,s2,'length'='10m') from root.test.d1; ``` ### 8.8 机器学习 @@ -1374,14 +1271,14 @@ select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 更多见文档[Machine-Learning](../SQL-Manual/UDF-Libraries.md#机器学习) ```sql -# AR -select ar(s0,"p"="2") from root.test.d0 +# AR; +select ar(s0,"p"="2") from root.test.d0; -# Representation -select representation(s0,"tb"="3","vb"="2") from root.test.d0 +# Representation; +select representation(s0,"tb"="3","vb"="2") from root.test.d0; -# RM -select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 +# RM; +select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0; ``` ## 9. 条件表达式 @@ -1394,24 +1291,24 @@ when 1000=1050 then "bad temperature" when P<=1000000 or P>=1100000 then "bad pressure" end as `result` -from root.test1 +from root.test1; select str, case when str like "%cc%" then "has cc" when str like "%dd%" then "has dd" else "no cc and dd" end as `result` -from root.test2 +from root.test2; select count(case when x<=1 then 1 end) as `(-∞,1]`, count(case when 1 SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 配置连续查询的时间窗口大小 ```sql CREATE CONTINUOUS QUERY cq2 - RESAMPLE RANGE 40s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) +END; -END - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 同时配置连续查询执行的周期性间隔和时间窗口大小 ```sql CREATE CONTINUOUS QUERY cq3 - RESAMPLE EVERY 20s RANGE 40s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) - FILL(100.0) +END; -END - - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 配置连续查询每次查询执行时间窗口的结束时间 ```sql CREATE CONTINUOUS QUERY cq4 - RESAMPLE EVERY 20s RANGE 40s, 20s - BEGIN - SELECT max_value(temperature) - INTO root.ln.wf02.wt02(temperature_max), root.ln.wf02.wt01(temperature_max), root.ln.wf01.wt02(temperature_max), root.ln.wf01.wt01(temperature_max) - FROM root.ln.*.* - GROUP BY(10s) - FILL(100.0) +END; -END - - - -\> SELECT temperature_max from root.ln.*.*; +SELECT temperature_max from root.ln.*.*; ``` #### 没有GROUP BY TIME子句的连续查询 ```sql CREATE CONTINUOUS QUERY cq5 - RESAMPLE EVERY 20s - BEGIN - SELECT temperature + 1 - INTO root.precalculated_sg.::(temperature) - FROM root.ln.*.* - align by device +END; -END - - - -\> SELECT temperature from root.precalculated_sg.*.* align by device; +SELECT temperature from root.precalculated_sg.*.* align by device; ``` ### 11.2 连续查询的管理 @@ -1686,18 +1499,12 @@ DROP CONTINUOUS QUERY s1_count_cq; 1. 创建一个连续查询 ```sql CREATE CQ s1_count_cq - BEGIN - -​ SELECT count(s1) - -​ INTO root.sg_count.d.count_s1 - -​ FROM root.sg.d - -​ GROUP BY(30m) - -END + SELECT count(s1) + INTO root.sg_count.d.count_s1 + FROM root.sg.d + GROUP BY(30m) +END; ``` 1. 查询连续查询的结果 ```sql @@ -1717,11 +1524,11 @@ CREATE FUNCTION AS (USING URI URI-STRING)? #### 不指定URI ```sql -CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' +CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample'; ``` #### 指定URI ```sql -CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http://jar/example.jar' +CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http://jar/example.jar'; ``` ### 12.3 UDF 卸载 @@ -1729,7 +1536,7 @@ CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' USING URI 'http:// DROP FUNCTION ``` ```sql -DROP FUNCTION example +DROP FUNCTION example; ``` ### 12.4 UDF 查询 @@ -1743,16 +1550,13 @@ SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; #### 与其他查询的嵌套查询 ```sql SELECT s1, s2, example(s1, s2) FROM root.sg.d1; - SELECT *, example(*) FROM root.sg.d1 DISABLE ALIGN; - SELECT s1 * example(* / s1 + s2) FROM root.sg.d1; - SELECT s1, s2, s1 + example(s1, s2), s1 - example(s1 + example(s1, s2) / s2) FROM root.sg.d1; ``` ### 12.5 查看所有注册的 UDF ```sql -SHOW FUNCTIONS +SHOW FUNCTIONS; ``` ## 13. 权限管理 @@ -1760,68 +1564,63 @@ SHOW FUNCTIONS - 创建用户(需 MANAGE_USER 权限) - ```SQL -CREATE USER -eg: CREATE USER user1 'passwd' +CREATE USER ; +eg: CREATE USER user1 'passwd'; ``` - 删除用户 (需 MANEGE_USER 权限) - ```SQL -DROP USER -eg: DROP USER user1 +DROP USER ; +eg: DROP USER user1; ``` - 创建角色 (需 MANAGE_ROLE 权限) ```SQL -CREATE ROLE -eg: CREATE ROLE role1 +CREATE ROLE ; +eg: CREATE ROLE role1; ``` - 删除角色 (需 MANAGE_ROLE 权限) - ```SQL -DROP ROLE -eg: DROP ROLE role1 +DROP ROLE ; +eg: DROP ROLE role1; ``` - 赋予用户角色 (需 MANAGE_ROLE 权限) - ```SQL -GRANT ROLE TO -eg: GRANT ROLE admin TO user1 +GRANT ROLE TO ; +eg: GRANT ROLE admin TO user1; ``` - 移除用户角色 (需 MANAGE_ROLE 权限) - ```SQL -REVOKE ROLE FROM -eg: REVOKE ROLE admin FROM user1 +REVOKE ROLE FROM ; +eg: REVOKE ROLE admin FROM user1; ``` - 列出所有用户 (需 MANEGE_USER 权限) ```SQL -LIST USER +LIST USER; ``` - 列出所有角色 (需 MANAGE_ROLE 权限) ```SQL -LIST ROLE +LIST ROLE; ``` - 列出指定角色下所有用户 (需 MANEGE_USER 权限) ```SQL -LIST USER OF ROLE -eg: LIST USER OF ROLE roleuser +LIST USER OF ROLE ; +eg: LIST USER OF ROLE roleuser; ``` - 列出指定用户下所有角色 @@ -1829,8 +1628,8 @@ eg: LIST USER OF ROLE roleuser 用户可以列出自己的角色,但列出其他用户的角色需要拥有 MANAGE_ROLE 权限。 ```SQL -LIST ROLE OF USER -eg: LIST ROLE OF USER tempuser +LIST ROLE OF USER ; +eg: LIST ROLE OF USER tempuser; ``` - 列出用户所有权限