CREATE TABLE t ( id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY, i INTEGER, c VARCHAR(30), ts TIMESTAMP ); INSERT INTO t (i, c, ts) SELECT (random() * 9999 + 1)::int AS i, md5(random()::text)::varchar(30) AS c, ( timestamp '2000-01-01' + random() * (timestamp '2025-12-31' - timestamp '2000-01-01') ) AS ts FROM generate_series(1, 200000000); -- export standard table to CSV COPY t TO '/mnt/unprotected/tmp/postgres/t.csv' DELIMITER ',' CSV HEADER; -- import standard table from CSV CREATE TABLE t ( id INTEGER, i INTEGER, c TEXT, ts TIMESTAMPTZ ); COPY t FROM '/mnt/unprotected/tmp/postgres/t.csv' DELIMITER ',' CSV HEADER; CREATE INDEX IF NOT EXISTS T_TS ON T (TS); ------------ -- Oracle -- ------------ CREATE TABLE t ( id INTEGER, i INTEGER, c VARCHAR2(30), ts TIMESTAMP ); -- file t.ctl LOAD DATA INFILE 't.csv' INTO TABLE t APPEND FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' TRAILING NULLCOLS ( id INTEGER EXTERNAL, i INTEGER EXTERNAL, c CHAR(30), ts TIMESTAMP "YYYY-MM-DD HH24:MI:SS.FF" ) sqlldr "'/ as sysdba'" \ control=t.ctl \ log=t.log \ bad=t.bad \ rows=50000 ------------------ -- TimescaleDB -- ------------------ Install & config from sources: https://www.tigerdata.com/docs/self-hosted/latest/install/installation-source CREATE TABLE ht ( id INTEGER, i INTEGER, c TEXT, ts TIMESTAMPTZ ); SELECT create_hypertable( 'ht', -- table name 'ts', -- time column chunk_time_interval => INTERVAL '1 month' ); SELECT add_retention_policy( 'ht', INTERVAL '25 years' ); SELECT * FROM timescaledb_information.jobs WHERE proc_name = 'policy_retention'; SELECT alter_job( job_id => , schedule_interval => INTERVAL '6 hours' ); timescaledb-parallel-copy --connection "postgres://postgres@localhost/db01" --table ht --file '/mnt/unprotected/tmp/postgres/t.csv' \ --workers 16 --reporting-period 30s -skip-header SELECT show_chunks('t'); ----------- -- Bench -- ----------- -- q1 select * from t where ts between timestamp'2015-04-01:09:00:00' and timestamp'2015-04-01:09:00:20'; -- q2 select count(*) from t; Classic PostgreSQL Table load: 5 min q1: 52 sec q2: 45 sec TimescaleDB Table load: 5 min db01=# SELECT pg_size_pretty(pg_total_relation_size('public.t')); pg_size_pretty ---------------- 18 GB (1 row) db01=# SELECT pg_size_pretty(hypertable_size('public.ht')); pg_size_pretty ---------------- 19 GB (1 row) ALTER TABLE ht SET ( timescaledb.compress ); SELECT add_compression_policy( 'ht', INTERVAL '2 years' ); SELECT job_id FROM timescaledb_information.jobs WHERE proc_name = 'policy_compression' AND hypertable_name = 'ht'; CALL run_job(1002); SELECT chunk_schema || '.' || chunk_name AS chunk, is_compressed, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'ht' ORDER BY range_start; ----------------------------------------- CREATE MATERIALIZED VIEW ht_hourly_avg WITH (timescaledb.continuous) AS SELECT time_bucket('1 hour', ts) AS bucket, AVG(i) AS avg_i FROM ht GROUP BY bucket; SELECT add_continuous_aggregate_policy('ht_hourly_avg', start_offset => INTERVAL '2 days', end_offset => INTERVAL '0 hours', schedule_interval => INTERVAL '5 minutes' ); SELECT add_continuous_aggregate_policy('ht_hourly_avg', start_offset => INTERVAL '7 days', end_offset => INTERVAL '0 hours', schedule_interval => INTERVAL '30 minutes' ); SELECT * FROM ht_hourly_avg WHERE bucket >= now() - INTERVAL '7 days' ORDER BY bucket; SELECT job_id, proc_name, config FROM timescaledb_information.jobs; SELECT pid, query, state, backend_type FROM pg_stat_activity WHERE query LIKE '%run_job%' AND query LIKE '%' || || '%';