PostgreSQL – Advanced Open Source Relational Database
Complete Guide to PostgreSQL: Enterprise-Grade Database Management
PostgreSQL stands as the world’s most advanced open-source relational database, combining reliability, feature completeness, and extensibility that rivals commercial database systems. Used by organizations ranging from startups to Fortune 500 companies, PostgreSQL handles everything from simple web applications to complex data warehousing and geospatial analysis. Its commitment to SQL standards compliance, ACID transactions, and data integrity has earned it the trust of the most demanding applications.
Beyond traditional relational features, PostgreSQL supports JSON data, full-text search, geographic objects, and custom data types, making it suitable for diverse use cases that would otherwise require multiple specialized databases.
Installation
# Ubuntu/Debian
sudo apt update
sudo apt install postgresql postgresql-contrib
# Fedora
sudo dnf install postgresql-server postgresql-contrib
sudo postgresql-setup --initdb
sudo systemctl start postgresql
# Arch Linux
sudo pacman -S postgresql
sudo -u postgres initdb -D /var/lib/postgres/data
sudo systemctl start postgresql
# macOS
brew install postgresql@16
brew services start postgresql@16
# Windows
# Download from postgresql.org
# Run installer
# Docker
docker run -d \
--name postgres \
-e POSTGRES_PASSWORD=mysecretpassword \
-p 5432:5432 \
-v postgres_data:/var/lib/postgresql/data \
postgres:16
# Verify installation
psql --version
sudo -u postgres psql -c "SELECT version();"
Basic Usage
# Connect to PostgreSQL
sudo -u postgres psql
psql -h localhost -U username -d database
# Connection string
psql "postgresql://user:password@localhost:5432/dbname"
# Basic commands
\l List databases
\c dbname Connect to database
\dt List tables
\d table Describe table
\du List users
\q Quit
# Create database
CREATE DATABASE mydb;
# Create user
CREATE USER myuser WITH PASSWORD 'password';
GRANT ALL PRIVILEGES ON DATABASE mydb TO myuser;
# Create table
CREATE TABLE users (
id SERIAL PRIMARY KEY,
username VARCHAR(50) UNIQUE NOT NULL,
email VARCHAR(100) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
# Insert data
INSERT INTO users (username, email)
VALUES ('john', 'john@example.com');
# Query data
SELECT * FROM users;
SELECT username, email FROM users WHERE id = 1;
# Update data
UPDATE users SET email = 'newemail@example.com' WHERE id = 1;
# Delete data
DELETE FROM users WHERE id = 1;
Data Types
# Numeric types
SMALLINT 2 bytes (-32768 to 32767)
INTEGER 4 bytes
BIGINT 8 bytes
DECIMAL(p,s) Variable precision
NUMERIC(p,s) Variable precision
REAL 4 bytes floating point
DOUBLE PRECISION 8 bytes floating point
SERIAL Auto-incrementing integer
# Character types
CHAR(n) Fixed length
VARCHAR(n) Variable length
TEXT Unlimited length
# Date/Time types
DATE Date only
TIME Time only
TIMESTAMP Date and time
TIMESTAMPTZ With timezone
INTERVAL Time interval
# Boolean
BOOLEAN true/false
# UUID
UUID Universally unique identifier
# JSON types
JSON Text JSON
JSONB Binary JSON (recommended)
# Array types
INTEGER[] Array of integers
TEXT[] Array of text
# Other types
BYTEA Binary data
INET IP address
CIDR Network address
MACADDR MAC address
MONEY Currency
Indexes
# Create index
CREATE INDEX idx_users_email ON users(email);
# Unique index
CREATE UNIQUE INDEX idx_users_username ON users(username);
# Partial index
CREATE INDEX idx_active_users ON users(email)
WHERE active = true;
# Multi-column index
CREATE INDEX idx_name ON users(first_name, last_name);
# GIN index (for JSON, arrays)
CREATE INDEX idx_data ON documents USING GIN (data);
# GiST index (for geometric, full-text)
CREATE INDEX idx_location ON places USING GIST (location);
# Show indexes
\di
SELECT * FROM pg_indexes WHERE tablename = 'users';
# Drop index
DROP INDEX idx_users_email;
# Analyze index usage
EXPLAIN ANALYZE SELECT * FROM users WHERE email = 'test@example.com';
JSON Support
# Create table with JSON
CREATE TABLE products (
id SERIAL PRIMARY KEY,
name VARCHAR(100),
attributes JSONB
);
# Insert JSON data
INSERT INTO products (name, attributes)
VALUES (
'Laptop',
'{"brand": "Dell", "specs": {"ram": 16, "storage": 512}}'
);
# Query JSON
SELECT attributes->>'brand' FROM products;
SELECT attributes->'specs'->>'ram' FROM products;
# Filter by JSON
SELECT * FROM products
WHERE attributes->>'brand' = 'Dell';
SELECT * FROM products
WHERE attributes @> '{"brand": "Dell"}';
# Update JSON
UPDATE products
SET attributes = jsonb_set(attributes, '{specs,ram}', '32')
WHERE id = 1;
# JSON functions
jsonb_pretty(attributes)
jsonb_array_elements(array_column)
jsonb_object_keys(attributes)
# GIN index for JSON
CREATE INDEX idx_attributes ON products USING GIN (attributes);
Full-Text Search
# Create text search column
ALTER TABLE articles ADD COLUMN search_vector tsvector;
# Update search vector
UPDATE articles SET search_vector =
to_tsvector('english', title || ' ' || content);
# Create GIN index
CREATE INDEX idx_search ON articles USING GIN (search_vector);
# Search query
SELECT title FROM articles
WHERE search_vector @@ to_tsquery('english', 'database & postgresql');
# Ranking results
SELECT title, ts_rank(search_vector, query) AS rank
FROM articles, to_tsquery('english', 'database') query
WHERE search_vector @@ query
ORDER BY rank DESC;
# Phrase search
SELECT * FROM articles
WHERE search_vector @@ phraseto_tsquery('english', 'open source database');
# Automatic update (trigger)
CREATE FUNCTION update_search_vector() RETURNS trigger AS $$
BEGIN
NEW.search_vector := to_tsvector('english', NEW.title || ' ' || NEW.content);
RETURN NEW;
END
$$ LANGUAGE plpgsql;
CREATE TRIGGER search_vector_update
BEFORE INSERT OR UPDATE ON articles
FOR EACH ROW EXECUTE FUNCTION update_search_vector();
Transactions
# Begin transaction
BEGIN;
# Or explicitly
START TRANSACTION;
# Perform operations
INSERT INTO accounts (name, balance) VALUES ('Alice', 1000);
UPDATE accounts SET balance = balance - 100 WHERE name = 'Alice';
UPDATE accounts SET balance = balance + 100 WHERE name = 'Bob';
# Commit
COMMIT;
# Rollback on error
ROLLBACK;
# Savepoints
BEGIN;
INSERT INTO orders (product_id, quantity) VALUES (1, 10);
SAVEPOINT order_created;
UPDATE inventory SET stock = stock - 10 WHERE product_id = 1;
-- If this fails
ROLLBACK TO order_created;
COMMIT;
# Transaction isolation levels
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
Backup and Restore
# pg_dump (logical backup)
pg_dump dbname > backup.sql
pg_dump -Fc dbname > backup.dump # Custom format
pg_dump -Ft dbname > backup.tar # Tar format
# Restore from dump
psql dbname < backup.sql
pg_restore -d dbname backup.dump
# Backup specific table
pg_dump -t tablename dbname > table_backup.sql
# Backup schema only
pg_dump --schema-only dbname > schema.sql
# Backup data only
pg_dump --data-only dbname > data.sql
# pg_dumpall (all databases)
pg_dumpall > all_databases.sql
# Restore all
psql -f all_databases.sql postgres
# Continuous archiving (WAL)
# In postgresql.conf:
archive_mode = on
archive_command = 'cp %p /archive/%f'
# Point-in-time recovery
pg_basebackup -D /backup/base -Fp -Xs -P
Performance Tuning
# Configuration (postgresql.conf)
# Memory
shared_buffers = 256MB # 25% of RAM
effective_cache_size = 768MB # 75% of RAM
work_mem = 16MB # Per operation
maintenance_work_mem = 256MB # For maintenance
# Connections
max_connections = 100
# WAL
wal_buffers = 16MB
checkpoint_completion_target = 0.9
# Query planning
random_page_cost = 1.1 # SSD
effective_io_concurrency = 200 # SSD
# EXPLAIN ANALYZE
EXPLAIN ANALYZE SELECT * FROM users WHERE email = 'test@example.com';
# Vacuum
VACUUM ANALYZE tablename;
VACUUM FULL tablename; # Reclaim space
# Statistics
SELECT * FROM pg_stat_user_tables;
SELECT * FROM pg_stat_user_indexes;
# Slow query log
log_min_duration_statement = 1000 # Log queries > 1s
# Connection pooling
# Use PgBouncer or pgpool-II
Administration
# User management
CREATE USER newuser WITH PASSWORD 'password';
ALTER USER newuser WITH SUPERUSER;
DROP USER olduser;
# Role management
CREATE ROLE readonly;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO readonly;
GRANT readonly TO newuser;
# Database maintenance
VACUUM ANALYZE;
REINDEX DATABASE dbname;
# Monitor connections
SELECT * FROM pg_stat_activity;
# Kill connection
SELECT pg_terminate_backend(pid);
# Table sizes
SELECT relname, pg_size_pretty(pg_relation_size(relid))
FROM pg_stat_user_tables;
# Database size
SELECT pg_size_pretty(pg_database_size('dbname'));
# Logs
# Location varies by installation
tail -f /var/log/postgresql/postgresql-16-main.log
Conclusion
PostgreSQL’s combination of reliability, features, and flexibility has made it the database of choice for organizations demanding enterprise-grade performance without licensing costs. Its extensibility, standards compliance, and active community ensure continued evolution while maintaining the stability required for critical applications. Whether building new applications or replacing commercial databases, PostgreSQL delivers the capabilities needed for modern data management.
Download Options
Download PostgreSQL – Advanced Open Source Relational Database
Version 16.2
File Size: 50 MB
Download NowSafe & Secure
Verified and scanned for viruses
Regular Updates
Always get the latest version
24/7 Support
Help available when you need it