PostgreSQL: Difference between revisions
Jump to navigation
Jump to search
(115 intermediate revisions by the same user not shown) | |||
Line 1: | Line 1: | ||
== Login to PSQL== | == Login to PSQL== | ||
< | <syntaxhighlight lang="bash"> | ||
export PGPASSWORD='p@55w0rd' | export PGPASSWORD='p@55w0rd' | ||
Line 18: | Line 18: | ||
# | # | ||
# postgres=# | # postgres=# | ||
</ | </syntaxhighlight> | ||
===Connection String=== | |||
---- | |||
{| | |||
|valign='top'| | |||
'''parameters based psql''' | |||
psql -U postgres | |||
psql -U academia postgres | |||
psql -U academia -h rds.vpc.chorke.org postgres | |||
psql -U academia -h rds.vpc.chorke.org -p 5432 postgres | |||
PGPASSWORD='sadaqah' psql -U academia -h rds.vpc.chorke.org -p 5432 postgres | |||
|valign='top'| | |||
'''connection string based psql:''' | |||
psql 'postgres://postgres:@:/' | |||
psql 'postgres://academia:@:/postgres' | |||
psql 'postgres://academia:@rds.vpc.chorke.org:/postgres' | |||
psql 'postgres://academia:@rds.vpc.chorke.org:5432/postgres' | |||
psql 'postgres://academia:[email protected]:5432/postgres' | |||
|} | |||
== Login Error == | == Login Error == | ||
{| | {| | ||
|valign='top'| | |valign='top'| | ||
< | <syntaxhighlight lang="bash"> | ||
# [email protected]:~ # | # [email protected]:~ # | ||
su - postgres psql | su - postgres psql | ||
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0 | # Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0 | ||
# /bin/createuser: /bin/createuser: cannot execute binary file | # /bin/createuser: /bin/createuser: cannot execute binary file | ||
</ | </syntaxhighlight> | ||
|valign='top'| | |valign='top'| | ||
< | <syntaxhighlight lang="bash"> | ||
# [email protected]:~ # | # [email protected]:~ # | ||
su postgres | su postgres | ||
psql -p 5433 | psql -p 5433 | ||
# psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1)) | # psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1)) | ||
</ | </syntaxhighlight> | ||
|valign='top'| | |valign='top'| | ||
< | <syntaxhighlight lang="bash"> | ||
# [email protected]:~ # | # [email protected]:~ # | ||
sudo su postgres | sudo su postgres | ||
psql -p 5433 | psql -p 5433 | ||
# psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1)) | # psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1)) | ||
</ | </syntaxhighlight> | ||
|} | |||
== Cluster Login == | |||
{| | |||
|valign='top'| | |||
<syntaxhighlight lang="bash"> | |||
pg_lsclusters | |||
sudo su - postgres | |||
</syntaxhighlight> | |||
|valign='top'| | |||
<syntaxhighlight lang="bash"> | |||
psql -p 5433 | |||
psql -U postgres -p 5433 | |||
</syntaxhighlight> | |||
|valign='top'| | |||
<syntaxhighlight lang="sql"> | |||
GRANT ALL ON tmp_coll TO academia; | |||
GRANT ALL ON test_json TO academia; | |||
</syntaxhighlight> | |||
|valign='top'| | |||
<syntaxhighlight lang="sql"> | |||
-- psql -V | |||
SELECT version(); | |||
</syntaxhighlight> | |||
|} | |} | ||
Line 81: | Line 129: | ||
# GRANT | # GRANT | ||
</syntaxhighlight> | </syntaxhighlight> | ||
==Extensions== | |||
{| | |||
| valign="top" | | |||
<syntaxhighlight lang="bash"> | |||
ENABLE_DB_LINK=$(cat <<-DDL | |||
CREATE EXTENSION IF NOT EXISTS dblink; | |||
CREATE EXTENSION IF NOT EXISTS dblink SCHEMA extensions; | |||
SELECT * FROM pg_available_extensions WHERE name = 'dblink'; | |||
DDL | |||
) | |||
echo "${ENABLE_DB_LINK}" | psql -p 5432 -U ${USER} | |||
echo "${ENABLE_DB_LINK}" | psql -p 5432 -U ${USER} academia_data_staging | |||
</syntaxhighlight> | |||
| valign="top" | | |||
<syntaxhighlight lang="bash"> | |||
ENABLE_PG_CRYPTO=$(cat <<-DDL | |||
CREATE EXTENSION IF NOT EXISTS pgcrypto; | |||
CREATE EXTENSION IF NOT EXISTS pgcrypto SCHEMA extensions; | |||
SELECT * FROM pg_available_extensions WHERE name = 'pgcrypto'; | |||
DDL | |||
) | |||
echo "${ENABLE_PG_CRYPTO}" | psql -p 5432 -U ${USER} | |||
echo "${ENABLE_PG_CRYPTO}" | psql -p 5432 -U ${USER} academia_data_staging | |||
</syntaxhighlight> | |||
|} | |||
== Logout PSQL == | == Logout PSQL == | ||
< | <syntaxhighlight lang="bash"> | ||
# postgres=# | # postgres=# | ||
\q | \q | ||
# -bash-4.2$ | # -bash-4.2$ | ||
</ | </syntaxhighlight> | ||
==Meta Data== | ==Meta Data== | ||
< | {| | ||
|valign="top"| | |||
<syntaxhighlight lang="sql"> | |||
-- show databases | -- show databases | ||
SELECT datname FROM pg_database; | SELECT datname FROM pg_database; -- \l+ | ||
-- show sequence | |||
SELECT c.relname FROM pg_class c -- \ds+ | |||
WHERE c.relkind = 'S' ORDER BY c.relname; | |||
-- show tables | -- show tables | ||
Line 99: | Line 180: | ||
-- describe tables | -- describe tables | ||
SELECT table_name, column_name, data_type | SELECT table_name, column_name, data_type -- \d+ my_table | ||
FROM information_schema.columns WHERE table_name = 'my_table'; | FROM information_schema.columns WHERE table_name = 'my_table'; | ||
Line 105: | Line 186: | ||
SELECT table_name, column_name, data_type | SELECT table_name, column_name, data_type | ||
FROM information_schema.columns WHERE column_name LIKE '%slug%'; | FROM information_schema.columns WHERE column_name LIKE '%slug%'; | ||
</ | </syntaxhighlight> | ||
|valign="top"| | |||
<syntaxhighlight lang="sql"> | |||
SELECT pg_database_size('my_database'); -- \l+ | |||
SELECT pg_size_pretty(pg_database_size('my_database')); -- \l+ | |||
SELECT | |||
db.datname AS db_name, | |||
pg_size_pretty(pg_database_size(db.datname)) AS db_size | |||
FROM pg_database db ORDER BY pg_database_size(db.datname) DESC; | |||
SELECT pg_size_pretty(pg_total_relation_size('my_table')); -- \dt+ | |||
SELECT -- \dt+ | |||
table_schema, table_name, | |||
pg_size_pretty(pg_total_relation_size(quote_ident(table_name))) AS table_size | |||
FROM information_schema.tables | |||
WHERE table_schema = 'public' | |||
ORDER BY 2 ASC; | |||
</syntaxhighlight> | |||
|} | |||
==XML Query== | |||
{| | |||
| valign="top" | | |||
<syntaxhighlight lang="sql"> | |||
SELECT | |||
xml_is_well_formed('<>') AS "<>", -- false | |||
xml_is_well_formed('<name >') AS "<name >", -- false | |||
xml_is_well_formed('<name />') AS "<name />", -- true | |||
xml_is_well_formed('academia') AS "academia", -- true | |||
xml_is_well_formed('{academia') AS "{academia", -- true | |||
xml_is_well_formed('{academia}') AS "{academia}"; -- true | |||
</syntaxhighlight> | |||
| valign="top" | | |||
<syntaxhighlight lang="sql"> | |||
SELECT | |||
xml_is_well_formed_content('<>') AS "<>", -- false | |||
xml_is_well_formed_content('<name >') AS "<name >", -- false | |||
xml_is_well_formed_content('<name />') AS "<name />", -- true | |||
xml_is_well_formed_content('academia') AS "academia", -- true | |||
xml_is_well_formed_content('{academia') AS "{academia", -- true | |||
xml_is_well_formed_content('{academia}') AS "{academia}"; -- true | |||
</syntaxhighlight> | |||
|- | |||
| valign="top" colspan="2"| | |||
---- | |||
|- | |||
| valign="top" | | |||
<syntaxhighlight lang="sql"> | |||
SELECT | |||
xml_is_well_formed_document('<>') AS "<>", -- false | |||
xml_is_well_formed_document('<name >') AS "<name >", -- false | |||
xml_is_well_formed_document('<name />') AS "<name />", -- true | |||
xml_is_well_formed_document('academia') AS "academia", -- false | |||
xml_is_well_formed_document('{academia') AS "{academia", -- false | |||
xml_is_well_formed_document('{academia}') AS "{academia}"; -- false | |||
</syntaxhighlight> | |||
| valign="top" | | |||
|- | |||
| valign="top" colspan="2"| | |||
---- | |||
|- | |||
| valign="top" | | |||
<syntaxhighlight lang="sql"> | |||
WITH xpath_node AS ( | |||
SELECT | |||
cast(raw_xpath_node->>0 AS integer) "sn", | |||
cast(raw_xpath_node->>1 AS varchar) "node" | |||
FROM jsonb_array_elements('[ | |||
[0, ""], | |||
[1, "bpmn:definitions"], | |||
[2, "bpmn:process"], | |||
[3, "bpmn:serviceTask"], | |||
[4, "bpmn:extensionElements"], | |||
[5, "camunda:connector"], | |||
[6, "camunda:inputOutput"], | |||
[7, "camunda:inputParameter"] | |||
]'::jsonb) AS raw_xpath_node | |||
) | |||
SELECT string_agg(node, '/') AS "path" FROM xpath_node; | |||
</syntaxhighlight> | |||
| valign="top" | | |||
<syntaxhighlight lang="sql"> | |||
WITH workflow_namespace AS ( | |||
SELECT | |||
cast(raw_workflow_namespace->>0 AS varchar) "prefix", | |||
cast(raw_workflow_namespace->>1 AS varchar) "namespace" | |||
FROM jsonb_array_elements('[ | |||
["bpmn", "http://www.omg.org/spec/BPMN/20100524/MODEL"], | |||
["bpmndi", "http://www.omg.org/spec/BPMN/20100524/DI"], | |||
["dc", "http://www.omg.org/spec/DD/20100524/DC"], | |||
["di", "http://www.omg.org/spec/DD/20100524/DI"], | |||
["modeler", "http://camunda.org/schema/modeler/1.0"], | |||
["camunda", "http://camunda.org/schema/1.0/bpmn"] | |||
]'::jsonb) AS raw_workflow_namespace | |||
) | |||
-- SELECT * FROM workflow_namespace; | |||
-- SELECT array[prefix, namespace] FROM workflow_namespace; | |||
SELECT array_agg(array[prefix, namespace]) FROM workflow_namespace; | |||
</syntaxhighlight> | |||
|- | |||
| valign="top" colspan="2"| | |||
---- | |||
|- | |||
| valign="top" colspan="2"| | |||
<syntaxhighlight lang="sql"> | |||
WITH xpath_ns AS ( | |||
SELECT | |||
cast(raw_xpath_ns->>0 AS varchar) "alias", | |||
cast(raw_xpath_ns->>1 AS varchar) "namespace" | |||
FROM jsonb_array_elements('[ | |||
["bpmn", "http://www.omg.org/spec/BPMN/20100524/MODEL"], | |||
["bpmndi", "http://www.omg.org/spec/BPMN/20100524/DI"], | |||
["dc", "http://www.omg.org/spec/DD/20100524/DC"], | |||
["di", "http://www.omg.org/spec/DD/20100524/DI"], | |||
["modeler", "http://camunda.org/schema/modeler/1.0"], | |||
["camunda", "http://camunda.org/schema/1.0/bpmn"] | |||
]'::jsonb) AS raw_xpath_ns | |||
), | |||
xpath_nss AS ( | |||
SELECT | |||
array_agg(array[ns.alias, ns.namespace]) | |||
FROM xpath_ns ns | |||
), | |||
stored_dataflow AS ( | |||
SELECT | |||
id_ AS "id", rev_ AS "revision", | |||
generated_ AS "is_generated", tenant_id_ AS "tenant_id", | |||
type_ AS "type", create_time_ AS "created_on", root_proc_inst_id_ AS "root_proc_inst_id", | |||
removal_time_ AS "deleted_on", name_ AS "name", convert_from(bytes_, 'UTF8') AS "dataflow" | |||
FROM act_ge_bytearray | |||
), | |||
stored_workflow AS ( | |||
SELECT | |||
sd.id, sd.name, sd.type, sd.revision, sd.is_generated, | |||
sd.created_on, sd.deleted_on, sd.root_proc_inst_id, | |||
CAST(sd.dataflow AS xml) AS "workflow", sd.dataflow, | |||
(SELECT * FROM xpath_nss) AS "ns" | |||
FROM stored_dataflow sd | |||
WHERE xml_is_well_formed_document(sd.dataflow) | |||
ORDER BY sd.name ASC, sd.created_on DESC | |||
) | |||
SELECT | |||
sw.name, | |||
(xpath('count(/bpmn:definitions/bpmn:process)', sw.workflow, sw.ns))[1]::text::int, | |||
(xpath('/bpmn:definitions/bpmn:process', sw.workflow, sw.ns))[1], dataflow | |||
FROM stored_workflow sw; | |||
</syntaxhighlight> | |||
|} | |||
==Array Query== | ==Array Query== | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT dependencies||'"Running Fiscal Year"' | SELECT dependencies||'"Running Fiscal Year"' | ||
FROM scope WHERE name = 'Fiscal Year Config'; | FROM scope WHERE name = 'Fiscal Year Config'; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT * FROM user | SELECT * FROM user | ||
WHERE owner_ref LIKE ANY (ARRAY[ | WHERE owner_ref LIKE ANY (ARRAY[ | ||
',academia,chorke.org,shahed.biz,' | ',academia,chorke.org,shahed.biz,' | ||
]) ORDER BY id ASC; | ]) ORDER BY id ASC; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT * FROM user | SELECT * FROM user | ||
WHERE owner_ref LIKE ALL (ARRAY[ | WHERE owner_ref LIKE ALL (ARRAY[ | ||
',academia,chorke.org,shahed.biz,' | ',academia,chorke.org,shahed.biz,' | ||
]) ORDER BY id ASC; | ]) ORDER BY id ASC; | ||
</ | </syntaxhighlight> | ||
==JSON Query== | ==JSON Query== | ||
< | <syntaxhighlight lang="json"> | ||
{ | { | ||
"type": "Reseller", | "type": "Reseller", | ||
Line 136: | Line 377: | ||
} | } | ||
} | } | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT extended_properties ->'commission'->'value' | SELECT extended_properties ->'commission'->'value' | ||
FROM seller WHERE extended_properties ->>'type' = 'Reseller'; | FROM seller WHERE extended_properties ->>'type' = 'Reseller'; | ||
Line 153: | Line 394: | ||
SET extended_properties = jsonb_set(extended_properties, '{commission, type}', '"Bonus"') | SET extended_properties = jsonb_set(extended_properties, '{commission, type}', '"Bonus"') | ||
WHERE id = 1000 AND extended_properties ->> 'type' = 'Reseller'; | WHERE id = 1000 AND extended_properties ->> 'type' = 'Reseller'; | ||
</ | </syntaxhighlight> | ||
---- | |||
{| | {| | ||
| valign="top" | | | valign="top" | | ||
'''JSON''' | '''JSON''' | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT id, name FROM json_to_record ( | SELECT id, name FROM json_to_record ( | ||
'{"id":1,"name":"Academia"}'::json | '{"id":1,"name":"Academia"}'::json | ||
) AS concern(id int, name text); | ) AS concern(id int, name text); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT * FROM json_array_elements_text('[ | SELECT * FROM json_array_elements_text('[ | ||
{"id":1,"name":"Academia"}, | {"id":1,"name":"Academia"}, | ||
Line 170: | Line 411: | ||
{"id":3,"name":"Software"} | {"id":3,"name":"Software"} | ||
]'::json); | ]'::json); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT id, name FROM json_array_elements_text('[ | SELECT id, name FROM json_array_elements_text('[ | ||
{"id": 1, "name": "Academia"}, | {"id": 1, "name": "Academia"}, | ||
Line 180: | Line 421: | ||
json_to_record(concerns_text::json) | json_to_record(concerns_text::json) | ||
AS concerns_json(id int, name text); | AS concerns_json(id int, name text); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT id, name FROM json_array_elements('[ | SELECT id, name FROM json_array_elements('[ | ||
{"id": 1, "name": "Academia"}, | {"id": 1, "name": "Academia"}, | ||
Line 190: | Line 431: | ||
json_to_record(concerns_array) | json_to_record(concerns_array) | ||
AS concerns_rows(id int, name text); | AS concerns_rows(id int, name text); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT id, name FROM json_to_recordset('{"data": [ | SELECT id, name FROM json_to_recordset('{"data": [ | ||
{"concerns": {"id": 1, "name": "Academia"}}, | {"concerns": {"id": 1, "name": "Academia"}}, | ||
Line 199: | Line 440: | ||
]}'::json -> 'data') as r(concerns json), | ]}'::json -> 'data') as r(concerns json), | ||
json_to_record(concerns) AS concern(id int, name text); | json_to_record(concerns) AS concern(id int, name text); | ||
</ | </syntaxhighlight> | ||
| valign="top" | | | valign="top" | | ||
'''JSONB''' | '''JSONB''' | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT id, name FROM jsonb_to_record ( | SELECT id, name FROM jsonb_to_record ( | ||
'{"id":1,"name":"Academia"}'::jsonb | '{"id":1,"name":"Academia"}'::jsonb | ||
) AS concern(id int, name text); | ) AS concern(id int, name text); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT * FROM jsonb_array_elements_text('[ | SELECT * FROM jsonb_array_elements_text('[ | ||
{"id":1,"name":"Academia"}, | {"id":1,"name":"Academia"}, | ||
Line 215: | Line 456: | ||
{"id":3,"name":"Software"} | {"id":3,"name":"Software"} | ||
]'::jsonb); | ]'::jsonb); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT id, name FROM jsonb_array_elements_text('[ | SELECT id, name FROM jsonb_array_elements_text('[ | ||
{"id": 1, "name": "Academia"}, | {"id": 1, "name": "Academia"}, | ||
Line 225: | Line 466: | ||
jsonb_to_record(concerns_text::jsonb) | jsonb_to_record(concerns_text::jsonb) | ||
AS concerns_json(id int, name text); | AS concerns_json(id int, name text); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT id, name FROM jsonb_array_elements('[ | SELECT id, name FROM jsonb_array_elements('[ | ||
{"id": 1, "name": "Academia"}, | {"id": 1, "name": "Academia"}, | ||
Line 235: | Line 476: | ||
jsonb_to_record(concerns_array) | jsonb_to_record(concerns_array) | ||
AS concerns_rows(id int, name text); | AS concerns_rows(id int, name text); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT id, name FROM jsonb_to_recordset('{"data": [ | SELECT id, name FROM jsonb_to_recordset('{"data": [ | ||
{"concerns": {"id": 1, "name": "Academia"}}, | {"concerns": {"id": 1, "name": "Academia"}}, | ||
Line 244: | Line 485: | ||
]}'::jsonb -> 'data') as r(concerns jsonb), | ]}'::jsonb -> 'data') as r(concerns jsonb), | ||
jsonb_to_record(concerns) AS concern(id int, name text); | jsonb_to_record(concerns) AS concern(id int, name text); | ||
</ | </syntaxhighlight> | ||
| valign="top" | | | valign="top" | | ||
Line 281: | Line 522: | ||
(3 rows) | (3 rows) | ||
|} | |} | ||
---- | |||
===Insert=== | |||
<syntaxhighlight lang="sql"> | |||
-- query » group : religion | |||
-- ----------------------------------------------------------------------------- | |||
-- SELECT MAX(id) FROM grouped_collection; | |||
-- SELECT * FROM grouped_collection WHERE group_name = 'religion'; | |||
-- DELETE FROM grouped_collection WHERE group_name = 'religion'; | |||
-- ----------------------------------------------------------------------------- | |||
WITH mixed_grouped_collection AS ( | |||
SELECT | |||
cast('chorke.org' AS varchar) "domain", | |||
cast('religion' AS varchar) "group_name", | |||
cast(raw_grouped_collection->>0 AS varchar) "code", | |||
cast(raw_grouped_collection->>1 AS varchar) "name", | |||
cast(raw_grouped_collection->>2 AS jsonb ) "extended_properties", | |||
cast(raw_grouped_collection->>3 AS numeric) "order_coll" | |||
FROM jsonb_array_elements('[ | |||
["I", "Islam", {}, 1], | |||
["B", "Buddhist", {}, 2], | |||
["H", "Hindu", {}, 3], | |||
["C", "Christian", {}, 4], | |||
["O", "Others", {}, 5] | |||
]'::jsonb) AS raw_grouped_collection | |||
) | |||
, new_grouped_collection AS ( | |||
SELECT | |||
ROW_NUMBER() OVER() "id", LOCALTIMESTAMP "created_on", | |||
mgc.domain, mgc.code, mgc.name, mgc.group_name, | |||
mgc.extended_properties, mgc.order_coll | |||
FROM mixed_grouped_collection mgc | |||
-- prevent old duplicate | |||
WHERE NOT EXISTS ( | |||
SELECT id FROM grouped_collection | |||
WHERE (code, group_name, domain) = (mgc.code, mgc.group_name, mgc.domain) | |||
) | |||
) | |||
SELECT ngc.* FROM new_grouped_collection ngc; | |||
-- INSERT INTO grouped_collection (id, created_on, domain, code, name, group_name, extended_properties, order_coll) | |||
-- SELECT (SELECT (COALESCE(MAX(id), 0) + ngc.id) FROM grouped_collection), | |||
-- ngc.created_on, ngc.domain, ngc.code, ngc.name, ngc.group_name, | |||
-- ngc.extended_properties, ngc.order_coll | |||
-- FROM new_grouped_collection ngc | |||
-- -- prevent new duplicate | |||
-- WHERE NOT EXISTS ( | |||
-- SELECT id FROM grouped_collection | |||
-- WHERE (code, group_name, domain) = (ngc.code, ngc.group_name, ngc.domain) | |||
-- ) | |||
-- ORDER BY (ngc.order_coll, ngc.id) ASC; | |||
</syntaxhighlight> | |||
==Delete Join== | ==Delete Join== | ||
< | <syntaxhighlight lang="sql"> | ||
-- USING is not an ANSI standard not supported by others RDMBS | -- USING is not an ANSI standard not supported by others RDMBS | ||
-- it's better to use sub query rather than USING for join | -- it's better to use sub query rather than USING for join | ||
Line 289: | Line 580: | ||
WHERE p.category_id = c.id AND | WHERE p.category_id = c.id AND | ||
c.name = 'Grocery'; | c.name = 'Grocery'; | ||
</ | </syntaxhighlight> | ||
==Sequence== | ==Sequence== | ||
< | <syntaxhighlight lang="sql> | ||
CREATE SEQUENCE IF NOT EXISTS public.policy_1000 | CREATE SEQUENCE IF NOT EXISTS public.policy_1000 | ||
INCREMENT 1 | INCREMENT 1 | ||
Line 307: | Line 598: | ||
ALTER SEQUENCE IF EXISTS policy_1000 RESTART 1; | ALTER SEQUENCE IF EXISTS policy_1000 RESTART 1; | ||
SELECT NEXTVAL('policy_1000')::int AS policy_id; | SELECT NEXTVAL('policy_1000')::int AS policy_id; | ||
</ | </syntaxhighlight> | ||
---- | ---- | ||
< | <syntaxhighlight lang="sql> | ||
GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO chorke_init_pro; | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO chorke_init_pro; | ||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO chorke_init_pro; | ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO chorke_init_pro; | ||
</ | </syntaxhighlight> | ||
==Echo/Print== | |||
<syntaxhighlight lang="sql> | |||
DO $BODY$ | |||
DECLARE | |||
p_jsonb jsonb; | |||
BEGIN | |||
-- SELECT INTO p_jsonb '["", "template","{templateType}","{productSlug}"]'::jsonb || '["{filePath}"]'::jsonb; | |||
p_jsonb := '["", "template","{templateType}","{productSlug}"]'::jsonb || '["{filePath}"]'::jsonb; | |||
RAISE NOTICE '%', p_jsonb; | |||
END; | |||
$BODY$; | |||
</syntaxhighlight> | |||
==Execute DDL== | ==Execute DDL== | ||
===Insert Using Loop=== | |||
<syntaxhighlight lang="sql> | |||
DO $BODY$ | |||
DECLARE | |||
p_id integer; | |||
p_domain varchar:= 'chorke.org'; | |||
p_group_name varchar:= 'occupation_class'; | |||
BEGIN | |||
SELECT INTO p_id (COALESCE(MAX(id), 0)) FROM grouped_collection; | |||
FOR p_code IN 1..5 LOOP | |||
INSERT INTO grouped_collection (id, created_on, domain, code, name, group_name, extended_properties) | |||
VALUES ((p_id + p_code), LOCALTIMESTAMP, p_domain, p_code, p_code, p_group_name, '{}'); | |||
END LOOP; | |||
END; | |||
$BODY$; | |||
</syntaxhighlight> | |||
===Anonymous Blocks=== | ===Anonymous Blocks=== | ||
< | <syntaxhighlight lang="sql> | ||
DO | DO | ||
$BODY$ | $BODY$ | ||
Line 333: | Line 654: | ||
con.conname LIKE 'uk_%'; | con.conname LIKE 'uk_%'; | ||
IF fiscal_year_ukc IS NOT NULL THEN | |||
fiscal_drop_ukc := fiscal_drop_ukc || ' ' | |||
'DROP CONSTRAINT ' || fiscal_year_ukc; | |||
EXECUTE fiscal_drop_ukc; | |||
END IF; | |||
END | END | ||
$BODY$; | $BODY$; | ||
</ | </syntaxhighlight> | ||
===Create Sequence=== | ===Create Sequence=== | ||
< | <syntaxhighlight lang="sql> | ||
CREATE OR REPLACE FUNCTION fn_get_policy_no(IN agent_no character varying) | CREATE OR REPLACE FUNCTION fn_get_policy_no(IN agent_no character varying) | ||
RETURNS integer | RETURNS integer | ||
Line 369: | Line 690: | ||
SELECT fn_get_policy_no('0001'); | SELECT fn_get_policy_no('0001'); | ||
</ | </syntaxhighlight> | ||
===Create Sequence with Comment=== | ===Create Sequence with Comment=== | ||
< | <syntaxhighlight lang="sql> | ||
CREATE OR REPLACE FUNCTION fn_get_policy_no(IN agent_no character varying, IN agent_desc character varying) | CREATE OR REPLACE FUNCTION fn_get_policy_no(IN agent_no character varying, IN agent_desc character varying) | ||
RETURNS integer | RETURNS integer | ||
Line 392: | Line 713: | ||
EXECUTE create_sequence; | EXECUTE create_sequence; | ||
EXECUTE comment_sequence; | |||
SELECT INTO policy_no NEXTVAL(sequence_name)::int; | SELECT INTO policy_no NEXTVAL(sequence_name)::int; | ||
RETURN policy_no; | RETURN policy_no; | ||
Line 401: | Line 722: | ||
-- @Query(value="SELECT fn_get_policy_no(:agent_no, :agent_desc)", nativeQuery = true) | -- @Query(value="SELECT fn_get_policy_no(:agent_no, :agent_desc)", nativeQuery = true) | ||
-- Integer getPolicyNo(@Param("agent_no") String agentNo, @Param("agent_desc") String agentDesc); | -- Integer getPolicyNo(@Param("agent_no") String agentNo, @Param("agent_desc") String agentDesc); | ||
</ | </syntaxhighlight> | ||
===Create From Table Exclude Column=== | ===Create From Table Exclude Column=== | ||
< | <syntaxhighlight lang="sql> | ||
DO | DO | ||
$BODY$ | $BODY$ | ||
Line 416: | Line 737: | ||
END | END | ||
$BODY$; | $BODY$; | ||
</ | </syntaxhighlight> | ||
==Date & Time== | ==Date & Time== | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT * FROM policy | SELECT * FROM policy | ||
WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd'); | WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd'); | ||
Line 425: | Line 746: | ||
UPDATE policy SET deleted_on = LOCALTIMESTAMP | UPDATE policy SET deleted_on = LOCALTIMESTAMP | ||
WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd'); | WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd'); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT EXTRACT(CENTURY FROM TIMESTAMP '2013-10-10 10:10:10'); | SELECT EXTRACT(CENTURY FROM TIMESTAMP '2013-10-10 10:10:10'); | ||
SELECT DATE_TRUNC('year', TIMESTAMP '2013-10-10 10:10:10'); | SELECT DATE_TRUNC('year', TIMESTAMP '2013-10-10 10:10:10'); | ||
Line 446: | Line 767: | ||
SELECT CURRENT_TIME; | SELECT CURRENT_TIME; | ||
SELECT CURRENT_DATE; | SELECT CURRENT_DATE; | ||
</ | </syntaxhighlight> | ||
==String Format== | |||
<syntaxhighlight lang="sql"> | |||
SELECT STRING_TO_ARRAY('/my/request/path', '/') "string_array"; | |||
SELECT ARRAY_TO_JSON(STRING_TO_ARRAY('/my/request/path', '/')) "json_array"; | |||
</syntaxhighlight> | |||
==Number Format== | |||
<syntaxhighlight lang="sql"> | |||
SELECT NULLIF(regexp_replace('CKI00109' , '\D','','g'), '')::numeric; | |||
SELECT NULLIF(regexp_replace('CKI0010#9####', '\D','','g'), '')::numeric; | |||
SELECT NULLIF(regexp_replace('CKI2203000075', '\D','','g'), '')::numeric; | |||
</syntaxhighlight> | |||
==Restoring the Dump== | ==Restoring the Dump== | ||
Line 453: | Line 787: | ||
pg_dump academia| gzip > academia_2020021.sql.gz | pg_dump academia| gzip > academia_2020021.sql.gz | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT pg_terminate_backend(pid) | SELECT pg_terminate_backend(pid) | ||
FROM pg_stat_activity WHERE datname IN | FROM pg_stat_activity WHERE datname IN | ||
Line 461: | Line 795: | ||
DROP DATABASE IF EXISTS academia; | DROP DATABASE IF EXISTS academia; | ||
CREATE DATABASE academia; | CREATE DATABASE academia; | ||
</ | </syntaxhighlight> | ||
'''restore dumps''' | '''restore dumps''' | ||
Line 473: | Line 807: | ||
==OS User Auth== | ==OS User Auth== | ||
< | <syntaxhighlight lang="sql"> | ||
CREATE DATABASE academia; | CREATE DATABASE academia; | ||
CREATE USER academia WITH LOGIN; | CREATE USER academia WITH LOGIN; | ||
GRANT ALL PRIVILEGES ON DATABASE academia TO academia; | GRANT ALL PRIVILEGES ON DATABASE academia TO academia; | ||
-- CREATE USER academia WITH ENCRYPTED PASSWORD 'sadaqah!'; | -- CREATE USER academia WITH ENCRYPTED PASSWORD 'sadaqah!'; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="bash"> | ||
echo "os-map academia academia" >> $PG_DATA/pg_ident.conf | echo "os-map academia academia" >> $PG_DATA/pg_ident.conf | ||
echo "host all all 127.0.0.1/32 ident map=os-map" >> $PG_DATA/pg_hba.conf | echo "host all all 127.0.0.1/32 ident map=os-map" >> $PG_DATA/pg_hba.conf | ||
pg_ctl --pgdata="$PG_DATA" reload; | pg_ctl --pgdata="$PG_DATA" reload; | ||
</ | </syntaxhighlight> | ||
==DBLink POC== | |||
<syntaxhighlight lang="sql"> | |||
CREATE EXTENSION dblink; | |||
CREATE EXTENSION dblink SCHEMA extensions; | |||
SELECT * FROM pg_available_extensions; | |||
SELECT * FROM pg_available_extensions WHERE name = 'dblink'; | |||
</syntaxhighlight> | |||
---- | |||
<syntaxhighlight lang="sql"> | |||
-- using pgsql block | |||
DO | |||
$BODY$ | |||
DECLARE | |||
p_db_host varchar:= host(inet_server_addr()); | |||
p_db_port varchar:= inet_server_port(); | |||
p_db_user varchar:= CURRENT_USER; | |||
p_db_pass varchar:= 'p@$$w0rd'; | |||
p_db_name varchar:= 'postgres'; | |||
p_db_extn_name varchar; | |||
p_db_exec_query varchar; | |||
p_db_conn_format varchar:= 'hostaddr=%s port=%s dbname=%s user=%s password=%s'; | |||
p_db_conn_string varchar:= format(p_db_conn_format, p_db_host, p_db_port, p_db_name, p_db_user, p_db_pass); | |||
BEGIN | |||
p_db_exec_query := 'SELECT name FROM pg_available_extensions WHERE name = ''dblink'''; | |||
SELECT INTO p_db_extn_name name FROM dblink(p_db_conn_string, p_db_exec_query) AS extensions(name varchar); | |||
RAISE NOTICE 'Extension Name: %', p_db_extn_name; | |||
END | |||
$BODY$; | |||
</syntaxhighlight> | |||
---- | |||
<syntaxhighlight lang="sql"> | |||
-- using pgsql query | |||
WITH user_info AS ( | |||
SELECT | |||
'hostaddr=%s port=%s dbname=%s user=%s password=%s' "db_conn_format", | |||
host(inet_server_addr()) "db_host", | |||
inet_server_port() "db_port", | |||
CURRENT_USER "db_user", | |||
'p@$$w0rd' "db_pass", | |||
'postgres' "db_name" | |||
) | |||
, conn_info AS ( | |||
SELECT | |||
db_conn_format, | |||
format(db_conn_format, db_host, db_port, db_name, db_user, db_pass) "db_conn_string", | |||
'SELECT name FROM pg_available_extensions WHERE name = ''dblink''' "db_exec_query" | |||
FROM user_info | |||
) | |||
SELECT * FROM dblink( | |||
(SELECT db_conn_string FROM conn_info), | |||
(SELECT db_exec_query FROM conn_info) | |||
) AS extensions(name varchar) | |||
</syntaxhighlight> | |||
==DBLink Exec== | ==DBLink Exec== | ||
< | <syntaxhighlight lang="sql"> | ||
DO | DO | ||
$BODY$ | $BODY$ | ||
Line 496: | Line 886: | ||
END | END | ||
$BODY$; | $BODY$; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
DO | DO | ||
$BODY$ | $BODY$ | ||
Line 526: | Line 916: | ||
END | END | ||
$BODY$; | $BODY$; | ||
</ | </syntaxhighlight> | ||
==Recursive Query== | |||
<syntaxhighlight lang="sql"> | |||
WITH RECURSIVE hierarchy AS ( | |||
SELECT | |||
o.id, | |||
o.code, o.name, | |||
o.parent "parent_code", | |||
p.name "parent_name" | |||
FROM org_unit o | |||
LEFT JOIN org_unit p ON p.code = o.parent | |||
WHERE o.code = LOWER(REPLACE('Chorke Agency', ' ', '-')) | |||
UNION | |||
SELECT | |||
o.id, | |||
o.code, o.name, | |||
o.parent "parent_code", | |||
p.name "parent_name" | |||
FROM org_unit o | |||
LEFT JOIN org_unit p ON p.code = o.parent | |||
INNER JOIN hierarchy h ON h.parent_code = o.code | |||
) | |||
SELECT * FROM hierarchy; | |||
</syntaxhighlight> | |||
==Encode/Decode== | ==Encode/Decode== | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT decode('Chorke Academia, Inc.', 'escape'); -- text to binary | SELECT decode('Chorke Academia, Inc.', 'escape'); -- text to binary | ||
SELECT encode('Chorke Academia, Inc.'::bytea, 'escape'); -- binary to text | SELECT encode('Chorke Academia, Inc.'::bytea, 'escape'); -- binary to text | ||
Line 538: | Line 952: | ||
SELECT encode('Chorke Academia, Inc.'::bytea, 'base64'); -- text to base64 | SELECT encode('Chorke Academia, Inc.'::bytea, 'base64'); -- text to base64 | ||
SELECT convert_from(decode('Q2hvcmtlIEFjYWRlbWlhLCBJbmMu', 'base64'), 'UTF8'); -- base64 to text | SELECT convert_from(decode('Q2hvcmtlIEFjYWRlbWlhLCBJbmMu', 'base64'), 'UTF8'); -- base64 to text | ||
</ | </syntaxhighlight> | ||
---- | ---- | ||
'''Base36 Encode''' | '''Base36 Encode''' | ||
< | <syntaxhighlight lang="sql"> | ||
CREATE OR REPLACE FUNCTION fn_base36_encode(IN base10 bigint) | CREATE OR REPLACE FUNCTION fn_base36_encode(IN base10 bigint) | ||
RETURNS varchar | RETURNS varchar | ||
Line 561: | Line 975: | ||
END | END | ||
$BODY$; | $BODY$; | ||
</ | </syntaxhighlight> | ||
---- | ---- | ||
'''Base36 Decode''' | '''Base36 Decode''' | ||
< | <syntaxhighlight lang="sql"> | ||
CREATE OR REPLACE FUNCTION fn_base36_decode(IN base36 varchar) | CREATE OR REPLACE FUNCTION fn_base36_decode(IN base36 varchar) | ||
RETURNS bigint | RETURNS bigint | ||
Line 591: | Line 1,005: | ||
END | END | ||
$BODY$; | $BODY$; | ||
</ | </syntaxhighlight> | ||
==Large Object== | ==Large Object== | ||
\dfS lo_* | \dfS lo_* | ||
< | <syntaxhighlight lang="sql"> | ||
-- lo_open(lobjId oid, mode integer) returns integer | -- lo_open(lobjId oid, mode integer) returns integer | ||
-- The mode parameter to lo_open uses two constants: | -- The mode parameter to lo_open uses two constants: | ||
Line 607: | Line 1,021: | ||
SELECT x'60000'::int; -- SELECT cast(x'60000' AS integer); -- 393216 | SELECT x'60000'::int; -- SELECT cast(x'60000' AS integer); -- 393216 | ||
SELECT (x'20000'|x'40000')::int -- SELECT cast(x'20000'|x'40000' AS integer); -- 393216 | SELECT (x'20000'|x'40000')::int -- SELECT cast(x'20000'|x'40000' AS integer); -- 393216 | ||
</ | </syntaxhighlight> | ||
---- | ---- | ||
< | <syntaxhighlight lang="sql"> | ||
-- example: a | -- example: a | ||
SELECT lo_get(photo::oid), lo_get(resume::oid) FROM employee; -- select large object as binary data | SELECT lo_get(photo::oid), lo_get(resume::oid) FROM employee; -- select large object as binary data | ||
SELECT lo_from_bytea(0, 'Chorke Academia, Inc.'::bytea) FROM employee; -- create large object from ascii text | SELECT lo_from_bytea(0, 'Chorke Academia, Inc.'::bytea) FROM employee; -- create large object from ascii text | ||
SELECT lo_from_bytea(0, convert_to('Chorke Academia, Inc.', 'UTF8')) FROM employee; -- create large object from utf-8 text | SELECT lo_from_bytea(0, convert_to('Chorke Academia, Inc.', 'UTF8')) FROM employee; -- create large object from utf-8 text | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
-- example: b | -- example: b | ||
SELECT resume FROM profile; -- oid | SELECT resume FROM profile; -- oid | ||
Line 622: | Line 1,036: | ||
SELECT encode(lo_get(resume::oid), 'escape') FROM profile; -- binary to text | SELECT encode(lo_get(resume::oid), 'escape') FROM profile; -- binary to text | ||
SELECT encode(lo_get(resume::oid), 'base64') FROM profile; -- binary to base64 | SELECT encode(lo_get(resume::oid), 'base64') FROM profile; -- binary to base64 | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
-- example: c | -- example: c | ||
SELECT convert_from(lo_get(resume::oid), 'UTF8') FROM profile; | SELECT convert_from(lo_get(resume::oid), 'UTF8') FROM profile; | ||
Line 631: | Line 1,045: | ||
SELECT lowrite(lo_open(lo_creat(-1), x'60000'::int), convert_to('Chorke Academia, Inc.', 'UTF8')) FROM profile; -- create large object | SELECT lowrite(lo_open(lo_creat(-1), x'60000'::int), convert_to('Chorke Academia, Inc.', 'UTF8')) FROM profile; -- create large object | ||
SELECT lowrite(lo_open(resume::oid, x'60000'::int), convert_to('Chorke Academia, Inc.', 'UTF8')) FROM profile; -- update large object | SELECT lowrite(lo_open(resume::oid, x'60000'::int), convert_to('Chorke Academia, Inc.', 'UTF8')) FROM profile; -- update large object | ||
</ | </syntaxhighlight> | ||
---- | ---- | ||
===Insert=== | ===Insert=== | ||
< | <syntaxhighlight lang="sql"> | ||
-- right way to insert in insert | -- right way to insert in insert | ||
INSERT INTO employee(id, resume, name) VALUES( | INSERT INTO employee(id, resume, name) VALUES( | ||
Line 641: | Line 1,055: | ||
'Chorke Academia, Inc.' | 'Chorke Academia, Inc.' | ||
); | ); | ||
</ | </syntaxhighlight> | ||
===Select=== | ===Select=== | ||
< | <syntaxhighlight lang="sql"> | ||
-- right way to select in select | -- right way to select in select | ||
SELECT | SELECT | ||
Line 653: | Line 1,067: | ||
convert_from(lo_get(resume::oid), 'UTF8') AS resume_text | convert_from(lo_get(resume::oid), 'UTF8') AS resume_text | ||
FROM employee; | FROM employee; | ||
</ | </syntaxhighlight> | ||
===Update=== | ===Update=== | ||
< | <syntaxhighlight lang="sql"> | ||
-- right way to create in update | -- right way to create in update | ||
-- wrong way to update in update | -- wrong way to update in update | ||
Line 671: | Line 1,085: | ||
-- we need to delete manually and which one might be disaster | -- we need to delete manually and which one might be disaster | ||
-- SELECT lo_unlink(orphan_lo::oid); | -- SELECT lo_unlink(orphan_lo::oid); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
-- right way to update in update | -- right way to update in update | ||
-- SELECT oid FROM pg_largeobject_metadata; | -- SELECT oid FROM pg_largeobject_metadata; | ||
Line 698: | Line 1,112: | ||
-- SELECT oid FROM pg_largeobject_metadata; | -- SELECT oid FROM pg_largeobject_metadata; | ||
-- SELECT * FROM employee | -- SELECT * FROM employee | ||
</ | </syntaxhighlight> | ||
===Delete=== | ===Delete=== | ||
< | <syntaxhighlight lang="sql"> | ||
-- wrong way to unlink in delete | -- wrong way to unlink in delete | ||
-- SELECT oid FROM pg_largeobject_metadata; | -- SELECT oid FROM pg_largeobject_metadata; | ||
Line 712: | Line 1,126: | ||
-- manually and which one might be disaster | -- manually and which one might be disaster | ||
-- SELECT lo_unlink(orphan_lo::oid); | -- SELECT lo_unlink(orphan_lo::oid); | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
-- right way to unlink in delete | -- right way to unlink in delete | ||
-- SELECT oid FROM pg_largeobject_metadata; | -- SELECT oid FROM pg_largeobject_metadata; | ||
Line 736: | Line 1,150: | ||
-- SELECT oid FROM pg_largeobject_metadata; | -- SELECT oid FROM pg_largeobject_metadata; | ||
-- SELECT * FROM employee; | -- SELECT * FROM employee; | ||
</ | </syntaxhighlight> | ||
===Alter=== | ===Alter=== | ||
< | <syntaxhighlight lang="sql"> | ||
ALTER TABLE employee | ALTER TABLE employee | ||
-- ALTER COLUMN photo TYPE bytea USING lo_get(photo::oid); | -- ALTER COLUMN photo TYPE bytea USING lo_get(photo::oid); | ||
ALTER COLUMN photo TYPE text USING encode(lo_get(photo::oid), 'base64'); | ALTER COLUMN photo TYPE text USING encode(lo_get(photo::oid), 'base64'); | ||
</ | </syntaxhighlight> | ||
==Bastion Dump== | |||
<syntaxhighlight lang='bash' highlight='6,8,12' line> | |||
BASH_REMOTE_SCRIPT_FRMT=$(cat <<'EOF' | |||
# suppress stdout | |||
echo '/* $STDOUT$' | |||
echo "${HOSTNAME}${HOME}" | |||
ssh -qt gtw.vpc.chorke.org <<'EOF_00' | |||
echo "${HOSTNAME}${HOME}" | |||
ssh -qt app.vpc.chorke.org <<'EOF_01' | |||
echo "${HOSTNAME}${HOME}" | |||
echo '$STDOUT$ */' | |||
pg_dump postgres://academia:%[email protected]:5432/%s | |||
# suppress stdout | |||
echo '/* $STDOUT$' | |||
EOF_01 | |||
EOF_00 | |||
echo '$STDOUT$ */' | |||
EOF | |||
) | |||
</syntaxhighlight> | |||
---- | |||
<syntaxhighlight lang='bash' start='21' highlight='5' line> | |||
BASH_REMOTE_SCRIPT_PASS='sadaqah!' | |||
BASH_REMOTE_SCRIPT_PGDB='academia_keycloak_staging' | |||
printf -v BASH_REMOTE_SCRIPT_EXEC "${BASH_REMOTE_SCRIPT_FRMT}" "${BASH_REMOTE_SCRIPT_PASS}" "${BASH_REMOTE_SCRIPT_PGDB}" | |||
printf -v BASH_EXPORT_SCRIPT_DUMP '%s.sql' "${BASH_REMOTE_SCRIPT_PGDB}" | |||
bash -c "${BASH_REMOTE_SCRIPT_EXEC}" > ${BASH_EXPORT_SCRIPT_DUMP} | |||
</syntaxhighlight> | |||
---- | |||
<syntaxhighlight lang='bash' start='27' highlight='1-3' line> | |||
sed '/^\/\* \$STDOUT\$$/,/^\$STDOUT\$ \*\/$/{/^\/\* \$STDOUT\$$/!{/^\$STDOUT\$ \*\/$/!d}}' -i ${BASH_EXPORT_SCRIPT_DUMP} | |||
sed -z 's|\n/\* $STDOUT$\n$STDOUT$ \*/||g' -i ${BASH_EXPORT_SCRIPT_DUMP} | |||
sed -z 's|/\* $STDOUT$\n$STDOUT$ \*/\n||g' -i ${BASH_EXPORT_SCRIPT_DUMP} | |||
gzip ${BASH_EXPORT_SCRIPT_DUMP} | |||
</syntaxhighlight> | |||
==Export CSV== | |||
<syntaxhighlight lang='bash'> | |||
psql -U academia academia_audit_staging | |||
\COPY (SELECT * FROM audit_log WHERE action_type = 'Sync Collection' AND DATE_TRUNC('day', logged_on) = '20241010')\ | |||
To '~/.config/audit_log_20241010_1010_MYT.csv' With CSV DELIMITER ',' HEADER; | |||
</syntaxhighlight> | |||
== Knowledge == | == Knowledge == | ||
Line 755: | Line 1,214: | ||
sed -i -e 's|^-- ||g' src/main/resources/db/migration/*.sql | sed -i -e 's|^-- ||g' src/main/resources/db/migration/*.sql | ||
< | <syntaxhighlight lang="sql"> | ||
-- psql | -- psql | ||
CREATE DATABASE chorke_init_pro; | CREATE DATABASE chorke_init_pro; | ||
Line 761: | Line 1,220: | ||
GRANT ALL PRIVILEGES ON DATABASE chorke_init_pro TO chorke_init_pro; | GRANT ALL PRIVILEGES ON DATABASE chorke_init_pro TO chorke_init_pro; | ||
-- \q | -- \q | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
-- revoke public connect from specific database after creation | -- revoke public connect from specific database after creation | ||
REVOKE CONNECT ON DATABASE academia_ebis_dev FROM PUBLIC; | REVOKE CONNECT ON DATABASE academia_ebis_dev FROM PUBLIC; | ||
-- revoke public connect for all future databases | -- revoke public connect for all future databases | ||
REVOKE CONNECT ON DATABASE template1 FROM PUBLIC; | REVOKE CONNECT ON DATABASE template1 FROM PUBLIC; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT | SELECT | ||
con.conname, | con.conname, | ||
Line 782: | Line 1,241: | ||
rel.relname = 'fiscal_year' AND | rel.relname = 'fiscal_year' AND | ||
con.conname LIKE 'uk_%'; | con.conname LIKE 'uk_%'; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT | SELECT | ||
n.nspname AS "schema", | n.nspname AS "schema", | ||
Line 808: | Line 1,267: | ||
AND pg_catalog.pg_table_is_visible(c.oid) | AND pg_catalog.pg_table_is_visible(c.oid) | ||
ORDER BY 1,2; | ORDER BY 1,2; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT d.datname AS "db_name", | SELECT d.datname AS "db_name", | ||
pg_catalog.pg_get_userbyid(d.datdba) AS "db_owner", | pg_catalog.pg_get_userbyid(d.datdba) AS "db_owner", | ||
Line 827: | Line 1,286: | ||
JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid | JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid | ||
ORDER BY 1; | ORDER BY 1; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
SELECT | SELECT | ||
usesysid AS "user_id", | usesysid AS "user_id", | ||
Line 836: | Line 1,295: | ||
usesuper AS "is_super_user" | usesuper AS "is_super_user" | ||
FROM pg_catalog.pg_user ORDER BY 1; | FROM pg_catalog.pg_user ORDER BY 1; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
DO | DO | ||
$BODY$ | $BODY$ | ||
Line 853: | Line 1,312: | ||
END | END | ||
$BODY$; | $BODY$; | ||
</ | </syntaxhighlight> | ||
< | <syntaxhighlight lang="sql"> | ||
cat << EOF | psql -U postgres | cat << EOF | psql -U postgres | ||
DROP DATABASE IF EXISTS academia_ebis_staging; | DROP DATABASE IF EXISTS academia_ebis_staging; | ||
Line 862: | Line 1,321: | ||
CREATE USER academia WITH ENCRYPTED PASSWORD 'sadaqah!'; | CREATE USER academia WITH ENCRYPTED PASSWORD 'sadaqah!'; | ||
GRANT ALL PRIVILEGES ON DATABASE academia_ebis_staging TO academia; | GRANT ALL PRIVILEGES ON DATABASE academia_ebis_staging TO academia; | ||
ALTER USER academia WITH SUPERUSER; | ALTER DATABASE academia_ebis_staging OWNER TO academia; | ||
-- ALTER USER academia WITH SUPERUSER; | |||
EOF | EOF | ||
</ | </syntaxhighlight> | ||
==Allow Remote== | |||
{| | |||
| valign="top" | | |||
apt install -y iputils-ping telnet dnsutils | |||
sudo systemctl status ufw | |||
sudo ufw status verbose | |||
| valign="top" | | |||
sudo ufw status numbered | |||
sudo ufw allow 5432/tcp | |||
sudo ufw enable | |||
|- | |||
| colspan="2" | | |||
---- | |||
|- | |||
| valign="bottom" | | |||
nano /etc/postgresql/14/main/postgresql.conf | |||
| valign="top" | | |||
nano /etc/postgresql/14/main/pg_hba.conf | |||
|- | |||
| colspan="2" | | |||
---- | |||
|- | |||
| valign="bottom" | | |||
<syntaxhighlight lang="ini" start="60" line> | |||
listen_addresses = '*' # what IP address(es) to listen on; | |||
# comma-separated list of addresses; | |||
# defaults to 'localhost'; use '*' for all | |||
# (change requires restart) | |||
port = 5432 # (change requires restart) | |||
</syntaxhighlight> | |||
| valign="top" | | |||
<syntaxhighlight lang="ini" start="96" line> | |||
# IPv4 local connections: | |||
host all all 127.0.0.1/32 scram-sha-256 | |||
host all all 0.0.0.0/0 scram-sha-256 | |||
# host all all 10.19.83.1/24 scram-sha-256 | |||
# host all all 10.20.13.1/24 scram-sha-256 | |||
</syntaxhighlight> | |||
|} | |||
==Spread Sheet== | |||
<syntaxhighlight lang="python"> | |||
=CONCAT("(LOCALTIMESTAMP, 'religion', 'chorke.org', (SELECT (COALESCE(MAX(id), 0) + 1) FROM grouped_collection), '", SUBSTITUTE(A2 , """" , ""), "', '", SUBSTITUTE(A2 , """" , ""), "', '{""state"": """ , SUBSTITUTE(B2 , "'" , "''") , """, ""city"": """ , SUBSTITUTE(C2 , "'" , "''") , """}'),") | |||
=CONCAT("[""", SUBSTITUTE(A2 , """", ""), """, """, SUBSTITUTE(A2, """", ""), "",""", {""state"": """ , SUBSTITUTE(B2 , "'" , "''") , """, ""city"": """ , SUBSTITUTE(C2 , "'" , "''") , """}", "],") | |||
=CONCAT("[""", SUBSTITUTE(C518 , """", ""), """, """, SUBSTITUTE(B518, """", ""), "",""", {}, " , A518 , ", ", IF(ISBLANK(D518), "false", "true"), "],") | |||
=CONCAT("[""", SUBSTITUTE(C5 , """", ""), """, """, SUBSTITUTE(B5, """", ""), "",""", {""class"": """, D5 ,""", ""income"": true}, " , A5, "],") | |||
=CONCAT("[""", SUBSTITUTE(C27 , """", ""), """, """, SUBSTITUTE(B27, """", ""), "",""", {}, " , A27, "],") | |||
=CONCAT("[""", C27, """, """, B27, "",""", {}, " , A27, "],") | |||
</syntaxhighlight> | |||
==Docker PSQL== | |||
{| | |||
| valign="top" | | |||
docker run -d --name postgres\ | |||
-e POSTGRES_PASSWORD=sadaqah\ | |||
-v ./init.sql:/docker-entrypoint-initdb.d/init.sql\ | |||
-p 127.0.0.1:5433:5432\ | |||
postgres:12.14 | |||
| valign="top" | | |||
docker exec -it postgres bash | |||
psql -U postgres | |||
docker stop postgres | |||
docker rm postgres | |||
| valign="top" | | |||
docker exec -it postgres\ | |||
psql -U postgres | |||
docker ps -a | |||
docker rm postgres -f | |||
|} | |||
==References == | ==References == | ||
Line 895: | Line 1,439: | ||
* [https://stackoverflow.com/questions/8833679/ <code>PL/pgSQL</code> Create dynamic table from string] | * [https://stackoverflow.com/questions/8833679/ <code>PL/pgSQL</code> Create dynamic table from string] | ||
* [https://www.postgresql.org/docs/current/plpgsql.html <code>PL/pgSQL</code> SQL Procedural Language] | * [https://www.postgresql.org/docs/current/plpgsql.html <code>PL/pgSQL</code> SQL Procedural Language] | ||
* [https://stackoverflow.com/questions/11892233/ <code>PL/pgSQL</code> Checking if a row exists] | |||
* [https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS <code>PL/pgSQL</code> Syntax Constants] | * [https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS <code>PL/pgSQL</code> Syntax Constants] | ||
* [https://www.postgresql.org/docs/current/contrib-dblink-connect.html <code>PL/pgSQL</code> DBLink Connect] | * [https://www.postgresql.org/docs/current/contrib-dblink-connect.html <code>PL/pgSQL</code> DBLink Connect] | ||
Line 945: | Line 1,490: | ||
|- | |- | ||
| valign="top" | | | valign="top" | | ||
* [https://stackoverflow.com/questions/24944347/ PostgreSQL Nested JSON Querying] | * [https://scalegrid.io/blog/using-jsonb-in-postgresql-how-to-effectively-store-index-json-data-in-postgresql/ PostgreSQL » Effectively Store & Index JSON Data] | ||
* [https://medium.com/@mnu/update-a-postgresql-table-using-a-with-query-648eefaae2a6 PostgreSQL » Update a table using a WITH query] | |||
* [https://stackoverflow.com/questions/24006291/ PostgreSQL » Return Result Set as JSON Array] | |||
* [https://dba.stackexchange.com/questions/251133/ PostgreSQL » Update all values in JSON Array] | |||
* [https://stackoverflow.com/questions/53777508/ PostgreSQL » pgAdmin » Export to a CSV File] | |||
* [https://stackoverflow.com/questions/44075557/ PostgreSQL » Nested JSON Arrays Query] | |||
* [https://stackoverflow.com/questions/24944347/ PostgreSQL » Nested JSON Querying] | |||
* [https://stackoverflow.com/questions/20272650/ PostgreSQL » Loop over JSON Arrays] | |||
* [https://stackoverflow.com/questions/1517635/ PostgreSQL » Export to a CSV File] | |||
* [https://www.postgresql.org/docs/14/queries-with.html PostgreSQL » WITH Queries] | |||
| valign="top" | | |||
* [https://stackoverflow.com/questions/50364904/ PostgreSQL » <code>WITH</code> clause with <code>INSERT</code> statement] | |||
* [https://hasura.io/blog/top-psql-commands-and-flags-you-need-to-know-postgresql PostgreSQL » PSQL Short Commands and Flags] | |||
* [https://stackoverflow.com/questions/65478350/ Column type is JSON but expression is Varchar] | |||
* [https://stackoverflow.com/questions/34708509/ PostgreSQL » <code>RETURNING</code> with <code>ON CONFLICT</code>] | |||
* [https://dba.stackexchange.com/questions/316681/ PostgreSQL 14 not Started on Ubuntu 22.04] | |||
* [[Dockerized PostgreSQL|PostgreSQL » Dockerized]] | |||
| valign="top" | | |||
|- | |||
| colspan="3" | | |||
---- | |||
|- | |||
| valign="top" | | |||
* [[Convention for Database Tables]] | |||
* [[Base36 Encoding and Decoding]] | |||
* [[EclipseLink]] | |||
* [[MapStruct]] | |||
* [[Hibernate]] | |||
* [[Liquibase]] | |||
* [[MyBatis]] | |||
* [[Lombok]] | |||
* [[Locale]] | |||
* [[JPA]] | |||
| valign="top" | | | valign="top" | | ||
* [https://stackoverflow.com/questions/533256/ PostgreSQL » Aggregate multiple rows as array] | |||
* [https://stackoverflow.com/questions/39620317 PostgreSQL » JSON column with H2] | |||
* [https://www.postgresql.org/docs/12/xml2.html PostgreSQL » 12 » XML2 Functions] | |||
* [https://www.postgresql.org/docs/12/functions-json.html PostgreSQL » 12 » JSON Functions] | |||
* [https://www.postgresql.org/docs/9.1/functions-xml.html PostgreSQL » 9.1 » XML Functions] | |||
* [https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html Java » ISO <code>DateTimeFormatter</code>] | |||
* [https://pgpedia.info/x/xmlelement.html PostgreSQL » XML <code>xmlelement</code>] | |||
* [https://pgpedia.info/categories/xml.html PostgreSQL » XML » Categories] | |||
* [[Base64]] | |||
* [[Jasypt]] | |||
| valign="top" | | | valign="top" | | ||
* [https://www.enterprisedb.com/blog/how-configure-oauth-20-pgadmin-4 PostgreSQL » PgAdmin » OAuth 2.0 » Configure] | |||
* [https://www.pgadmin.org/docs/pgadmin4/6.21/oauth2.html PostgreSQL » PgAdmin » OAuth 2.0 » Enabling] | |||
* [https://www.postgresql.org/docs/12/pgcrypto.html PostgreSQL » 12 » <code>pgcrypto</code>] | |||
* [[PostgreSQL/PgBouncer|PostgreSQL » PgBouncer]] | |||
* [[MySQL]] | |||
|} | |} |
Latest revision as of 02:41, 30 September 2024
Login to PSQL
export PGPASSWORD='p@55w0rd'
# [email protected]:~ #
psql -U postgres
#psql (10.15 (Ubuntu 10.15-0ubuntu0.18.04.1))
#Type "help" for help.
# [email protected]:~ #
su - postgres
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0
# -bash-4.2$
psql
# psql (9.6.8)
# Type "help" for help.
#
# postgres=#
Connection String
parameters based psql psql -U postgres psql -U academia postgres psql -U academia -h rds.vpc.chorke.org postgres psql -U academia -h rds.vpc.chorke.org -p 5432 postgres PGPASSWORD='sadaqah' psql -U academia -h rds.vpc.chorke.org -p 5432 postgres |
connection string based psql: psql 'postgres://postgres:@:/' psql 'postgres://academia:@:/postgres' psql 'postgres://academia:@rds.vpc.chorke.org:/postgres' psql 'postgres://academia:@rds.vpc.chorke.org:5432/postgres' psql 'postgres://academia:[email protected]:5432/postgres' |
Login Error
# [email protected]:~ #
su - postgres psql
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0
# /bin/createuser: /bin/createuser: cannot execute binary file
|
# [email protected]:~ #
su postgres
psql -p 5433
# psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1))
|
# [email protected]:~ #
sudo su postgres
psql -p 5433
# psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1))
|
Cluster Login
pg_lsclusters
sudo su - postgres
|
psql -p 5433
psql -U postgres -p 5433
|
GRANT ALL ON tmp_coll TO academia;
GRANT ALL ON test_json TO academia;
|
-- psql -V
SELECT version();
|
Creating User
# [email protected]:~ #
su - postgres
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0
# -bash-4.2$
createuser root
# createuser: creation of new role failed: ERROR: role "root" already exists
# ALTER USER root WITH SUPERUSER;
Creating DB
Until creating root database it's always show error could not change directory to "/root": Permission denied
. To overcome this error we should need to create a root database. only for avoid error while login using root user.
# [email protected]:~ #
su - postgres
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0
# -bash-4.2$
createdb root
Similarly you can create a database as same as your frequently used OS User. In this case no need password to login. It will authenticated from OS. That's means you can login to your PostgreSQL user using OS Authentication.
Grant Privileges
# postgres=#
grant all privileges on database root to root;
# GRANT
# postgres=#
grant all privileges on database postgres to root;
# GRANT
Extensions
ENABLE_DB_LINK=$(cat <<-DDL
CREATE EXTENSION IF NOT EXISTS dblink;
CREATE EXTENSION IF NOT EXISTS dblink SCHEMA extensions;
SELECT * FROM pg_available_extensions WHERE name = 'dblink';
DDL
)
echo "${ENABLE_DB_LINK}" | psql -p 5432 -U ${USER}
echo "${ENABLE_DB_LINK}" | psql -p 5432 -U ${USER} academia_data_staging
|
ENABLE_PG_CRYPTO=$(cat <<-DDL
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE EXTENSION IF NOT EXISTS pgcrypto SCHEMA extensions;
SELECT * FROM pg_available_extensions WHERE name = 'pgcrypto';
DDL
)
echo "${ENABLE_PG_CRYPTO}" | psql -p 5432 -U ${USER}
echo "${ENABLE_PG_CRYPTO}" | psql -p 5432 -U ${USER} academia_data_staging
|
Logout PSQL
# postgres=#
\q
# -bash-4.2$
Meta Data
-- show databases
SELECT datname FROM pg_database; -- \l+
-- show sequence
SELECT c.relname FROM pg_class c -- \ds+
WHERE c.relkind = 'S' ORDER BY c.relname;
-- show tables
SELECT * FROM pg_catalog.pg_tables
WHERE schemaname NOT IN ('pg_catalog','information_schema');
-- describe tables
SELECT table_name, column_name, data_type -- \d+ my_table
FROM information_schema.columns WHERE table_name = 'my_table';
-- find tables with similar field name
SELECT table_name, column_name, data_type
FROM information_schema.columns WHERE column_name LIKE '%slug%';
|
SELECT pg_database_size('my_database'); -- \l+
SELECT pg_size_pretty(pg_database_size('my_database')); -- \l+
SELECT
db.datname AS db_name,
pg_size_pretty(pg_database_size(db.datname)) AS db_size
FROM pg_database db ORDER BY pg_database_size(db.datname) DESC;
SELECT pg_size_pretty(pg_total_relation_size('my_table')); -- \dt+
SELECT -- \dt+
table_schema, table_name,
pg_size_pretty(pg_total_relation_size(quote_ident(table_name))) AS table_size
FROM information_schema.tables
WHERE table_schema = 'public'
ORDER BY 2 ASC;
|
XML Query
SELECT
xml_is_well_formed('<>') AS "<>", -- false
xml_is_well_formed('<name >') AS "<name >", -- false
xml_is_well_formed('<name />') AS "<name />", -- true
xml_is_well_formed('academia') AS "academia", -- true
xml_is_well_formed('{academia') AS "{academia", -- true
xml_is_well_formed('{academia}') AS "{academia}"; -- true
|
SELECT
xml_is_well_formed_content('<>') AS "<>", -- false
xml_is_well_formed_content('<name >') AS "<name >", -- false
xml_is_well_formed_content('<name />') AS "<name />", -- true
xml_is_well_formed_content('academia') AS "academia", -- true
xml_is_well_formed_content('{academia') AS "{academia", -- true
xml_is_well_formed_content('{academia}') AS "{academia}"; -- true
|
| |
SELECT
xml_is_well_formed_document('<>') AS "<>", -- false
xml_is_well_formed_document('<name >') AS "<name >", -- false
xml_is_well_formed_document('<name />') AS "<name />", -- true
xml_is_well_formed_document('academia') AS "academia", -- false
xml_is_well_formed_document('{academia') AS "{academia", -- false
xml_is_well_formed_document('{academia}') AS "{academia}"; -- false
|
|
| |
WITH xpath_node AS (
SELECT
cast(raw_xpath_node->>0 AS integer) "sn",
cast(raw_xpath_node->>1 AS varchar) "node"
FROM jsonb_array_elements('[
[0, ""],
[1, "bpmn:definitions"],
[2, "bpmn:process"],
[3, "bpmn:serviceTask"],
[4, "bpmn:extensionElements"],
[5, "camunda:connector"],
[6, "camunda:inputOutput"],
[7, "camunda:inputParameter"]
]'::jsonb) AS raw_xpath_node
)
SELECT string_agg(node, '/') AS "path" FROM xpath_node;
|
WITH workflow_namespace AS (
SELECT
cast(raw_workflow_namespace->>0 AS varchar) "prefix",
cast(raw_workflow_namespace->>1 AS varchar) "namespace"
FROM jsonb_array_elements('[
["bpmn", "http://www.omg.org/spec/BPMN/20100524/MODEL"],
["bpmndi", "http://www.omg.org/spec/BPMN/20100524/DI"],
["dc", "http://www.omg.org/spec/DD/20100524/DC"],
["di", "http://www.omg.org/spec/DD/20100524/DI"],
["modeler", "http://camunda.org/schema/modeler/1.0"],
["camunda", "http://camunda.org/schema/1.0/bpmn"]
]'::jsonb) AS raw_workflow_namespace
)
-- SELECT * FROM workflow_namespace;
-- SELECT array[prefix, namespace] FROM workflow_namespace;
SELECT array_agg(array[prefix, namespace]) FROM workflow_namespace;
|
| |
WITH xpath_ns AS (
SELECT
cast(raw_xpath_ns->>0 AS varchar) "alias",
cast(raw_xpath_ns->>1 AS varchar) "namespace"
FROM jsonb_array_elements('[
["bpmn", "http://www.omg.org/spec/BPMN/20100524/MODEL"],
["bpmndi", "http://www.omg.org/spec/BPMN/20100524/DI"],
["dc", "http://www.omg.org/spec/DD/20100524/DC"],
["di", "http://www.omg.org/spec/DD/20100524/DI"],
["modeler", "http://camunda.org/schema/modeler/1.0"],
["camunda", "http://camunda.org/schema/1.0/bpmn"]
]'::jsonb) AS raw_xpath_ns
),
xpath_nss AS (
SELECT
array_agg(array[ns.alias, ns.namespace])
FROM xpath_ns ns
),
stored_dataflow AS (
SELECT
id_ AS "id", rev_ AS "revision",
generated_ AS "is_generated", tenant_id_ AS "tenant_id",
type_ AS "type", create_time_ AS "created_on", root_proc_inst_id_ AS "root_proc_inst_id",
removal_time_ AS "deleted_on", name_ AS "name", convert_from(bytes_, 'UTF8') AS "dataflow"
FROM act_ge_bytearray
),
stored_workflow AS (
SELECT
sd.id, sd.name, sd.type, sd.revision, sd.is_generated,
sd.created_on, sd.deleted_on, sd.root_proc_inst_id,
CAST(sd.dataflow AS xml) AS "workflow", sd.dataflow,
(SELECT * FROM xpath_nss) AS "ns"
FROM stored_dataflow sd
WHERE xml_is_well_formed_document(sd.dataflow)
ORDER BY sd.name ASC, sd.created_on DESC
)
SELECT
sw.name,
(xpath('count(/bpmn:definitions/bpmn:process)', sw.workflow, sw.ns))[1]::text::int,
(xpath('/bpmn:definitions/bpmn:process', sw.workflow, sw.ns))[1], dataflow
FROM stored_workflow sw;
|
Array Query
SELECT dependencies||'"Running Fiscal Year"'
FROM scope WHERE name = 'Fiscal Year Config';
SELECT * FROM user
WHERE owner_ref LIKE ANY (ARRAY[
',academia,chorke.org,shahed.biz,'
]) ORDER BY id ASC;
SELECT * FROM user
WHERE owner_ref LIKE ALL (ARRAY[
',academia,chorke.org,shahed.biz,'
]) ORDER BY id ASC;
JSON Query
{
"type": "Reseller",
"commission": {
"type": "Percentage",
"value": "25"
}
}
SELECT extended_properties ->'commission'->'value'
FROM seller WHERE extended_properties ->>'type' = 'Reseller';
--
UPDATE seller
SET extended_properties = jsonb_set(extended_properties, '{type}', '"Retailer"')
WHERE id = 1000;
--
UPDATE seller
SET extended_properties = extended_properties || jsonb_build_object('code', code)
WHERE id = 1000;
--
UPDATE seller
SET extended_properties = jsonb_set(extended_properties, '{commission, type}', '"Bonus"')
WHERE id = 1000 AND extended_properties ->> 'type' = 'Reseller';
JSON SELECT id, name FROM json_to_record (
'{"id":1,"name":"Academia"}'::json
) AS concern(id int, name text);
SELECT * FROM json_array_elements_text('[
{"id":1,"name":"Academia"},
{"id":2,"name":"Agronomy"},
{"id":3,"name":"Software"}
]'::json);
SELECT id, name FROM json_array_elements_text('[
{"id": 1, "name": "Academia"},
{"id": 2, "name": "Agronomy"},
{"id": 3, "name": "Software"}
]'::json) AS concerns_text,
json_to_record(concerns_text::json)
AS concerns_json(id int, name text);
SELECT id, name FROM json_array_elements('[
{"id": 1, "name": "Academia"},
{"id": 2, "name": "Agronomy"},
{"id": 3, "name": "Software"}
]'::json) AS concerns_array,
json_to_record(concerns_array)
AS concerns_rows(id int, name text);
SELECT id, name FROM json_to_recordset('{"data": [
{"concerns": {"id": 1, "name": "Academia"}},
{"concerns": {"id": 2, "name": "Agronomy"}},
{"concerns": {"id": 3, "name": "Software"}}
]}'::json -> 'data') as r(concerns json),
json_to_record(concerns) AS concern(id int, name text);
|
JSONB SELECT id, name FROM jsonb_to_record (
'{"id":1,"name":"Academia"}'::jsonb
) AS concern(id int, name text);
SELECT * FROM jsonb_array_elements_text('[
{"id":1,"name":"Academia"},
{"id":2,"name":"Agronomy"},
{"id":3,"name":"Software"}
]'::jsonb);
SELECT id, name FROM jsonb_array_elements_text('[
{"id": 1, "name": "Academia"},
{"id": 2, "name": "Agronomy"},
{"id": 3, "name": "Software"}
]'::jsonb) AS concerns_text,
jsonb_to_record(concerns_text::jsonb)
AS concerns_json(id int, name text);
SELECT id, name FROM jsonb_array_elements('[
{"id": 1, "name": "Academia"},
{"id": 2, "name": "Agronomy"},
{"id": 3, "name": "Software"}
]'::jsonb) AS concerns_array,
jsonb_to_record(concerns_array)
AS concerns_rows(id int, name text);
SELECT id, name FROM jsonb_to_recordset('{"data": [
{"concerns": {"id": 1, "name": "Academia"}},
{"concerns": {"id": 2, "name": "Agronomy"}},
{"concerns": {"id": 3, "name": "Software"}}
]}'::jsonb -> 'data') as r(concerns jsonb),
jsonb_to_record(concerns) AS concern(id int, name text);
|
Result id | name ----+---------- 1 | Academia value ------------------------------- {"id": 1, "name": "Academia"} {"id": 2, "name": "Agronomy"} {"id": 3, "name": "Software"} id | name ----+---------- 1 | Academia 2 | Agronomy 3 | Software (3 rows) id | name ----+---------- 1 | Academia 2 | Agronomy 3 | Software (3 rows) id | name ----+---------- 1 | Academia 2 | Agronomy 3 | Software (3 rows) |
Insert
-- query » group : religion
-- -----------------------------------------------------------------------------
-- SELECT MAX(id) FROM grouped_collection;
-- SELECT * FROM grouped_collection WHERE group_name = 'religion';
-- DELETE FROM grouped_collection WHERE group_name = 'religion';
-- -----------------------------------------------------------------------------
WITH mixed_grouped_collection AS (
SELECT
cast('chorke.org' AS varchar) "domain",
cast('religion' AS varchar) "group_name",
cast(raw_grouped_collection->>0 AS varchar) "code",
cast(raw_grouped_collection->>1 AS varchar) "name",
cast(raw_grouped_collection->>2 AS jsonb ) "extended_properties",
cast(raw_grouped_collection->>3 AS numeric) "order_coll"
FROM jsonb_array_elements('[
["I", "Islam", {}, 1],
["B", "Buddhist", {}, 2],
["H", "Hindu", {}, 3],
["C", "Christian", {}, 4],
["O", "Others", {}, 5]
]'::jsonb) AS raw_grouped_collection
)
, new_grouped_collection AS (
SELECT
ROW_NUMBER() OVER() "id", LOCALTIMESTAMP "created_on",
mgc.domain, mgc.code, mgc.name, mgc.group_name,
mgc.extended_properties, mgc.order_coll
FROM mixed_grouped_collection mgc
-- prevent old duplicate
WHERE NOT EXISTS (
SELECT id FROM grouped_collection
WHERE (code, group_name, domain) = (mgc.code, mgc.group_name, mgc.domain)
)
)
SELECT ngc.* FROM new_grouped_collection ngc;
-- INSERT INTO grouped_collection (id, created_on, domain, code, name, group_name, extended_properties, order_coll)
-- SELECT (SELECT (COALESCE(MAX(id), 0) + ngc.id) FROM grouped_collection),
-- ngc.created_on, ngc.domain, ngc.code, ngc.name, ngc.group_name,
-- ngc.extended_properties, ngc.order_coll
-- FROM new_grouped_collection ngc
-- -- prevent new duplicate
-- WHERE NOT EXISTS (
-- SELECT id FROM grouped_collection
-- WHERE (code, group_name, domain) = (ngc.code, ngc.group_name, ngc.domain)
-- )
-- ORDER BY (ngc.order_coll, ngc.id) ASC;
Delete Join
-- USING is not an ANSI standard not supported by others RDMBS
-- it's better to use sub query rather than USING for join
DELETE FROM product p USING category c
WHERE p.category_id = c.id AND
c.name = 'Grocery';
Sequence
CREATE SEQUENCE IF NOT EXISTS public.policy_1000
INCREMENT 1
START 46656 -- 1,000
MINVALUE 46656 -- 1,000
MAXVALUE 1679615 -- Z,ZZZ
CACHE 10;
ALTER SEQUENCE public.policy_1000 OWNER TO chorke_init_pro;
COMMENT ON SEQUENCE public.policy_1000 IS 'Academia Policy Sequence';
SELECT CURRVAL('policy_1000');
SELECT SETVAL('policy_1000', 0);
ALTER SEQUENCE IF EXISTS policy_1000 RESTART 1;
SELECT NEXTVAL('policy_1000')::int AS policy_id;
GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO chorke_init_pro;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO chorke_init_pro;
Echo/Print
DO $BODY$
DECLARE
p_jsonb jsonb;
BEGIN
-- SELECT INTO p_jsonb '["", "template","{templateType}","{productSlug}"]'::jsonb || '["{filePath}"]'::jsonb;
p_jsonb := '["", "template","{templateType}","{productSlug}"]'::jsonb || '["{filePath}"]'::jsonb;
RAISE NOTICE '%', p_jsonb;
END;
$BODY$;
Execute DDL
Insert Using Loop
DO $BODY$
DECLARE
p_id integer;
p_domain varchar:= 'chorke.org';
p_group_name varchar:= 'occupation_class';
BEGIN
SELECT INTO p_id (COALESCE(MAX(id), 0)) FROM grouped_collection;
FOR p_code IN 1..5 LOOP
INSERT INTO grouped_collection (id, created_on, domain, code, name, group_name, extended_properties)
VALUES ((p_id + p_code), LOCALTIMESTAMP, p_domain, p_code, p_code, p_group_name, '{}');
END LOOP;
END;
$BODY$;
Anonymous Blocks
DO
$BODY$
DECLARE
fiscal_year_ukc character varying;
fiscal_drop_ukc character varying := 'ALTER TABLE fiscal_year';
BEGIN
SELECT
INTO fiscal_year_ukc con.conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE
nsp.nspname = 'public' AND
rel.relname = 'fiscal_year' AND
con.conname LIKE 'uk_%';
IF fiscal_year_ukc IS NOT NULL THEN
fiscal_drop_ukc := fiscal_drop_ukc || ' '
'DROP CONSTRAINT ' || fiscal_year_ukc;
EXECUTE fiscal_drop_ukc;
END IF;
END
$BODY$;
Create Sequence
CREATE OR REPLACE FUNCTION fn_get_policy_no(IN agent_no character varying)
RETURNS integer
LANGUAGE 'plpgsql'
AS
$BODY$
DECLARE
policy_no integer;
sequence_name character varying := 'policy_' || agent_no;
create_sequence character varying := 'CREATE SEQUENCE IF NOT EXISTS ' || sequence_name;
BEGIN
create_sequence := create_sequence || ' '
'INCREMENT 1 '
'START 46656 ' -- 1,000
'MINVALUE 46656 ' -- 1,000
'MAXVALUE 1679615 ' -- Z,ZZZ
'CACHE 10 ';
EXECUTE create_sequence;
SELECT INTO policy_no NEXTVAL(sequence_name)::int;
RETURN policy_no;
END
$BODY$;
SELECT fn_get_policy_no('0001');
Create Sequence with Comment
CREATE OR REPLACE FUNCTION fn_get_policy_no(IN agent_no character varying, IN agent_desc character varying)
RETURNS integer
LANGUAGE 'plpgsql'
AS
$BODY$
DECLARE
policy_no integer;
sequence_name character varying := 'policy_' || agent_no;
create_sequence character varying := 'CREATE SEQUENCE IF NOT EXISTS ' || sequence_name;
comment_sequence character varying := 'COMMENT ON SEQUENCE ' || sequence_name ||' IS ''' || agent_desc || '''';
BEGIN
create_sequence := create_sequence || ' '
'INCREMENT 1 '
'START 46656 ' -- 1,000
'MINVALUE 46656 ' -- 1,000
'MAXVALUE 1679615 ' -- Z,ZZZ
'CACHE 10 ';
EXECUTE create_sequence;
EXECUTE comment_sequence;
SELECT INTO policy_no NEXTVAL(sequence_name)::int;
RETURN policy_no;
END
$BODY$;
SELECT fn_get_policy_no('0001', 'Fareast Islami Life Insurance');
-- @Query(value="SELECT fn_get_policy_no(:agent_no, :agent_desc)", nativeQuery = true)
-- Integer getPolicyNo(@Param("agent_no") String agentNo, @Param("agent_desc") String agentDesc);
Create From Table Exclude Column
DO
$BODY$
DECLARE
create_table character varying;
BEGIN
SELECT INTO create_table 'CREATE TABLE IF NOT EXISTS invoice__temp AS SELECT ' || STRING_AGG(isc.column_name, ', ') || ' FROM invoice'
FROM (SELECT * FROM information_schema.columns WHERE table_name = 'invoice' AND
column_name NOT IN ('id', 'created_on') ORDER BY ordinal_position ASC) isc;
EXECUTE create_table;
END
$BODY$;
Date & Time
SELECT * FROM policy
WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd');
UPDATE policy SET deleted_on = LOCALTIMESTAMP
WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd');
SELECT EXTRACT(CENTURY FROM TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_TRUNC('year', TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_TRUNC('hour', TIMESTAMP '2013-10-10 10:10:10');
SELECT AGE(TIMESTAMP '2023-10-10', TIMESTAMP '1983-10-10');
SELECT EXTRACT(DAY FROM TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_PART('day', TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_PART('hour', INTERVAL '4 hours 3 minutes');
SELECT JUSTIFY_INTERVAL(INTERVAL '1 mon -1 hour');
SELECT JUSTIFY_HOURS(INTERVAL '27 hours');
SELECT JUSTIFY_DAYS(INTERVAL '35 days');
SELECT AGE(TIMESTAMP '1983-10-10');
SELECT CURRENT_TIMESTAMP(2)
SELECT CURRENT_TIMESTAMP;
SELECT LOCALTIMESTAMP;
SELECT CURRENT_TIME;
SELECT CURRENT_DATE;
String Format
SELECT STRING_TO_ARRAY('/my/request/path', '/') "string_array";
SELECT ARRAY_TO_JSON(STRING_TO_ARRAY('/my/request/path', '/')) "json_array";
Number Format
SELECT NULLIF(regexp_replace('CKI00109' , '\D','','g'), '')::numeric;
SELECT NULLIF(regexp_replace('CKI0010#9####', '\D','','g'), '')::numeric;
SELECT NULLIF(regexp_replace('CKI2203000075', '\D','','g'), '')::numeric;
Restoring the Dump
export dumps pg_dump academia > academia_20200210_1020.sql pg_dump academia| gzip > academia_2020021.sql.gz
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity WHERE datname IN
(
'academia'
);
DROP DATABASE IF EXISTS academia;
CREATE DATABASE academia;
restore dumps psql academia < academia_20200210_1020.sql gunzip -c academia_20200210_1020.sql.gz | psql academia
export table as csv
psql -U postgres \c my_database_staging \COPY my_table_name TO '/home/academia/Downloads/my_table_name.csv' DELIMITER ',' CSV HEADER;
OS User Auth
CREATE DATABASE academia;
CREATE USER academia WITH LOGIN;
GRANT ALL PRIVILEGES ON DATABASE academia TO academia;
-- CREATE USER academia WITH ENCRYPTED PASSWORD 'sadaqah!';
echo "os-map academia academia" >> $PG_DATA/pg_ident.conf
echo "host all all 127.0.0.1/32 ident map=os-map" >> $PG_DATA/pg_hba.conf
pg_ctl --pgdata="$PG_DATA" reload;
DBLink POC
CREATE EXTENSION dblink;
CREATE EXTENSION dblink SCHEMA extensions;
SELECT * FROM pg_available_extensions;
SELECT * FROM pg_available_extensions WHERE name = 'dblink';
-- using pgsql block
DO
$BODY$
DECLARE
p_db_host varchar:= host(inet_server_addr());
p_db_port varchar:= inet_server_port();
p_db_user varchar:= CURRENT_USER;
p_db_pass varchar:= 'p@$$w0rd';
p_db_name varchar:= 'postgres';
p_db_extn_name varchar;
p_db_exec_query varchar;
p_db_conn_format varchar:= 'hostaddr=%s port=%s dbname=%s user=%s password=%s';
p_db_conn_string varchar:= format(p_db_conn_format, p_db_host, p_db_port, p_db_name, p_db_user, p_db_pass);
BEGIN
p_db_exec_query := 'SELECT name FROM pg_available_extensions WHERE name = ''dblink''';
SELECT INTO p_db_extn_name name FROM dblink(p_db_conn_string, p_db_exec_query) AS extensions(name varchar);
RAISE NOTICE 'Extension Name: %', p_db_extn_name;
END
$BODY$;
-- using pgsql query
WITH user_info AS (
SELECT
'hostaddr=%s port=%s dbname=%s user=%s password=%s' "db_conn_format",
host(inet_server_addr()) "db_host",
inet_server_port() "db_port",
CURRENT_USER "db_user",
'p@$$w0rd' "db_pass",
'postgres' "db_name"
)
, conn_info AS (
SELECT
db_conn_format,
format(db_conn_format, db_host, db_port, db_name, db_user, db_pass) "db_conn_string",
'SELECT name FROM pg_available_extensions WHERE name = ''dblink''' "db_exec_query"
FROM user_info
)
SELECT * FROM dblink(
(SELECT db_conn_string FROM conn_info),
(SELECT db_exec_query FROM conn_info)
) AS extensions(name varchar)
DBLink Exec
DO
$BODY$
DECLARE
p_connstr text := 'hostaddr=127.0.0.1 port=5432 dbname=academia';
BEGIN
PERFORM dblink_exec(p_connstr, format('UPDATE commission SET type = ''%s'' WHERE type = ''%s''', 'new_type', 'old_type'));
END
$BODY$;
DO
$BODY$
DECLARE
p_schema varchar := 'academia';
p_passwd varchar := 'p@$$w0rd';
p_dbname varchar := 'academia_audit_staging';
p_dblink varchar := 'hostaddr=127.0.0.1 port=5432 dbname=postgres';
BEGIN
SELECT INTO p_dblink
'hostaddr='|| host(inet_server_addr()) ||
' port=' || inet_server_port() || ' dbname=' || current_database();
RAISE NOTICE '%', p_dblink;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_database WHERE datname = p_dbname) THEN
-- PERFORM dblink_exec(p_dblink, format('CREATE DATABASE %s', p_dbname));
RAISE NOTICE 'CREATE DATABASE %', p_dbname;
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_user WHERE usename = p_schema) THEN
-- PERFORM format('CREATE USER %s WITH ENCRYPTED PASSWORD ''%s''', p_schema, p_passwd);
RAISE NOTICE 'CREATE USER % WITH ENCRYPTED PASSWORD ''%''', p_schema, p_passwd;
END IF;
-- PERFORM format('GRANT ALL PRIVILEGES ON DATABASE %s TO %s', p_dbname, p_schema);
RAISE NOTICE 'GRANT ALL PRIVILEGES ON DATABASE % TO %', p_dbname, p_schema;
END
$BODY$;
Recursive Query
WITH RECURSIVE hierarchy AS (
SELECT
o.id,
o.code, o.name,
o.parent "parent_code",
p.name "parent_name"
FROM org_unit o
LEFT JOIN org_unit p ON p.code = o.parent
WHERE o.code = LOWER(REPLACE('Chorke Agency', ' ', '-'))
UNION
SELECT
o.id,
o.code, o.name,
o.parent "parent_code",
p.name "parent_name"
FROM org_unit o
LEFT JOIN org_unit p ON p.code = o.parent
INNER JOIN hierarchy h ON h.parent_code = o.code
)
SELECT * FROM hierarchy;
Encode/Decode
SELECT decode('Chorke Academia, Inc.', 'escape'); -- text to binary
SELECT encode('Chorke Academia, Inc.'::bytea, 'escape'); -- binary to text
SELECT convert_to('Chorke Academia, Inc.', 'UTF8'); -- text to binary
SELECT convert_from('Chorke Academia, Inc.'::bytea, 'UTF8'); -- binary to text
SELECT encode('Chorke Academia, Inc.'::bytea, 'base64'); -- text to base64
SELECT convert_from(decode('Q2hvcmtlIEFjYWRlbWlhLCBJbmMu', 'base64'), 'UTF8'); -- base64 to text
Base36 Encode
CREATE OR REPLACE FUNCTION fn_base36_encode(IN base10 bigint)
RETURNS varchar
LANGUAGE plpgsql IMMUTABLE
AS
$BODY$
DECLARE
base36 varchar := '';
intval bigint := abs(base10);
char0z char[] := regexp_split_to_array('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ', '');
BEGIN
WHILE intval != 0 LOOP
base36 := char0z[(intval % 36)+1] || base36;
intval := intval / 36;
END LOOP;
IF base10 = 0 THEN base36 := '0'; END IF;
RETURN base36;
END
$BODY$;
Base36 Decode
CREATE OR REPLACE FUNCTION fn_base36_decode(IN base36 varchar)
RETURNS bigint
LANGUAGE plpgsql
AS
$BODY$
DECLARE
rindex int;
intval int;
carray char[];
base10 bigint := 0;
char0z varchar := '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ';
BEGIN
FOR rindex IN REVERSE char_length(base36)..1 LOOP
carray := carray || substring(upper(base36) FROM rindex FOR 1)::char;
END LOOP;
rindex := 0;
WHILE rindex < (array_length(carray,1)) LOOP
intval := position(carray[rindex+1] IN char0z)-1;
base10 := base10 + (intval * (36 ^ rindex));
rindex := rindex + 1;
END LOOP;
RETURN base10;
END
$BODY$;
Large Object
\dfS lo_*
-- lo_open(lobjId oid, mode integer) returns integer
-- The mode parameter to lo_open uses two constants:
-- INV_READ = 0x20000
-- INV_WRITE = 0x40000
-- INV_READ_WRITE = 0x60000
SELECT x'20000'::int; -- SELECT cast(x'20000' AS integer); -- 131072
SELECT x'40000'::int; -- SELECT cast(x'40000' AS integer); -- 262144
SELECT x'60000'::int; -- SELECT cast(x'60000' AS integer); -- 393216
SELECT (x'20000'|x'40000')::int -- SELECT cast(x'20000'|x'40000' AS integer); -- 393216
-- example: a
SELECT lo_get(photo::oid), lo_get(resume::oid) FROM employee; -- select large object as binary data
SELECT lo_from_bytea(0, 'Chorke Academia, Inc.'::bytea) FROM employee; -- create large object from ascii text
SELECT lo_from_bytea(0, convert_to('Chorke Academia, Inc.', 'UTF8')) FROM employee; -- create large object from utf-8 text
-- example: b
SELECT resume FROM profile; -- oid
SELECT lo_get(resume::oid) FROM profile; -- binary
SELECT encode(lo_get(resume::oid), 'escape') FROM profile; -- binary to text
SELECT encode(lo_get(resume::oid), 'base64') FROM profile; -- binary to base64
-- example: c
SELECT convert_from(lo_get(resume::oid), 'UTF8') FROM profile;
SELECT convert_from(loread(lo_open(resume::oid, x'40000'::int), x'40000'::int), 'UTF8') FROM profile;
SELECT lowrite(lo_open(lo_creat(-1), x'60000'::int), convert_to('Chorke Academia, Inc.', 'UTF8')) FROM profile; -- create large object
SELECT lowrite(lo_open(resume::oid, x'60000'::int), convert_to('Chorke Academia, Inc.', 'UTF8')) FROM profile; -- update large object
Insert
-- right way to insert in insert
INSERT INTO employee(id, resume, name) VALUES(
(SELECT COALESCE(max(id), 0) + 1 FROM employee),
lo_from_bytea(0, convert_to('Chorke Academia, Inc.', 'UTF8')),
'Chorke Academia, Inc.'
);
Select
-- right way to select in select
SELECT
id,
name,
resume AS resume_oid,
lo_get(resume::oid) AS resume_binary,
convert_from(lo_get(resume::oid), 'UTF8') AS resume_text
FROM employee;
Update
-- right way to create in update
-- wrong way to update in update
-- SELECT oid FROM pg_largeobject_metadata;
UPDATE employee SET
resume = lo_from_bytea(0, convert_to('Chorke Academia, Inc.', 'UTF8'))
WHERE id = 1;
-- SELECT oid FROM pg_largeobject_metadata;
-- we should've to have 1 oid for a sigle record where it will
-- be created N+1 oid for N times of update. after N times of
-- update we need to delete N oid's from database else it will
-- be wastage of huge disk space if those object not deleted!
-- we need to delete manually and which one might be disaster
-- SELECT lo_unlink(orphan_lo::oid);
-- right way to update in update
-- SELECT oid FROM pg_largeobject_metadata;
DO
$BODY$
DECLARE
p_id integer:= 1;
p_oid integer:= 0;
p_resume varchar:= 'Chorke Academia, Inc.';
BEGIN
SELECT INTO p_oid COALESCE(m.oid, 0) FROM employee e
LEFT JOIN pg_largeobject_metadata m ON e.resume::oid = m.oid WHERE e.id = p_id;
IF p_oid > 0 THEN
PERFORM lowrite(lo_open(p_oid::oid, x'60000'::int), convert_to(p_resume, 'UTF8'));
ELSE
UPDATE employee SET
resume = lo_from_bytea(0, convert_to(p_resume, 'UTF8'))
WHERE id = p_id;
END IF;
END
$BODY$;
-- SELECT oid FROM pg_largeobject_metadata;
-- SELECT * FROM employee
Delete
-- wrong way to unlink in delete
-- SELECT oid FROM pg_largeobject_metadata;
DELETE FROM employee WHERE id = 1;
-- SELECT oid FROM pg_largeobject_metadata;
-- we should've to unlink respective oid's for those deleted
-- records. Either it will be root causes of wasting of huge
-- disk space if those object not deleted! we need to delete
-- manually and which one might be disaster
-- SELECT lo_unlink(orphan_lo::oid);
-- right way to unlink in delete
-- SELECT oid FROM pg_largeobject_metadata;
DO
$BODY$
DECLARE
p_id integer:= 1;
p_oid integer:= 0;
p_resume varchar:= 'Chorke Academia, Inc.';
BEGIN
SELECT INTO p_oid COALESCE(m.oid, 0) FROM employee e
LEFT JOIN pg_largeobject_metadata m ON e.resume::oid = m.oid WHERE e.id = p_id;
DELETE FROM employee WHERE id = p_id;
IF p_oid > 0 THEN
PERFORM lo_unlink(p_oid::oid);
END IF;
END
$BODY$;
-- SELECT oid FROM pg_largeobject_metadata;
-- SELECT * FROM employee;
Alter
ALTER TABLE employee
-- ALTER COLUMN photo TYPE bytea USING lo_get(photo::oid);
ALTER COLUMN photo TYPE text USING encode(lo_get(photo::oid), 'base64');
Bastion Dump
BASH_REMOTE_SCRIPT_FRMT=$(cat <<'EOF'
# suppress stdout
echo '/* $STDOUT$'
echo "${HOSTNAME}${HOME}"
ssh -qt gtw.vpc.chorke.org <<'EOF_00'
echo "${HOSTNAME}${HOME}"
ssh -qt app.vpc.chorke.org <<'EOF_01'
echo "${HOSTNAME}${HOME}"
echo '$STDOUT$ */'
pg_dump postgres://academia:%[email protected]:5432/%s
# suppress stdout
echo '/* $STDOUT$'
EOF_01
EOF_00
echo '$STDOUT$ */'
EOF
)
BASH_REMOTE_SCRIPT_PASS='sadaqah!'
BASH_REMOTE_SCRIPT_PGDB='academia_keycloak_staging'
printf -v BASH_REMOTE_SCRIPT_EXEC "${BASH_REMOTE_SCRIPT_FRMT}" "${BASH_REMOTE_SCRIPT_PASS}" "${BASH_REMOTE_SCRIPT_PGDB}"
printf -v BASH_EXPORT_SCRIPT_DUMP '%s.sql' "${BASH_REMOTE_SCRIPT_PGDB}"
bash -c "${BASH_REMOTE_SCRIPT_EXEC}" > ${BASH_EXPORT_SCRIPT_DUMP}
sed '/^\/\* \$STDOUT\$$/,/^\$STDOUT\$ \*\/$/{/^\/\* \$STDOUT\$$/!{/^\$STDOUT\$ \*\/$/!d}}' -i ${BASH_EXPORT_SCRIPT_DUMP}
sed -z 's|\n/\* $STDOUT$\n$STDOUT$ \*/||g' -i ${BASH_EXPORT_SCRIPT_DUMP}
sed -z 's|/\* $STDOUT$\n$STDOUT$ \*/\n||g' -i ${BASH_EXPORT_SCRIPT_DUMP}
gzip ${BASH_EXPORT_SCRIPT_DUMP}
Export CSV
psql -U academia academia_audit_staging
\COPY (SELECT * FROM audit_log WHERE action_type = 'Sync Collection' AND DATE_TRUNC('day', logged_on) = '20241010')\
To '~/.config/audit_log_20241010_1010_MYT.csv' With CSV DELIMITER ',' HEADER;
Knowledge
wsl (windows subsystem for linux) sudo service redis-server restart sudo service postgresql restart sudo service apache2 restart
comment and uncomment sql sed -i -e 's|^|-- |g' src/main/resources/db/migration/*.sql sed -i -e 's|^-- ||g' src/main/resources/db/migration/*.sql
-- psql
CREATE DATABASE chorke_init_pro;
CREATE USER chorke_init_pro WITH ENCRYPTED PASSWORD 'pa55w0rd';
GRANT ALL PRIVILEGES ON DATABASE chorke_init_pro TO chorke_init_pro;
-- \q
-- revoke public connect from specific database after creation
REVOKE CONNECT ON DATABASE academia_ebis_dev FROM PUBLIC;
-- revoke public connect for all future databases
REVOKE CONNECT ON DATABASE template1 FROM PUBLIC;
SELECT
con.conname,
nsp.nspname,
rel.relname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE
nsp.nspname = 'public' AND
rel.relname = 'fiscal_year' AND
con.conname LIKE 'uk_%';
SELECT
n.nspname AS "schema",
c.relname AS "name",
CASE c.relkind
WHEN 'v' THEN 'view'
WHEN 'i' THEN 'index'
WHEN 'r' THEN 'table'
WHEN 's' THEN 'special'
WHEN 'S' THEN 'sequence'
WHEN 'f' THEN 'foreign table'
WHEN 'p' THEN 'partitioned table'
WHEN 'I' THEN 'partitioned index'
WHEN 'm' THEN 'materialized view'
END AS "type",
pg_catalog.pg_get_userbyid(c.relowner) AS "owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('S','')
AND n.nspname !~ '^pg_toast'
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY 1,2;
SELECT d.datname AS "db_name",
pg_catalog.pg_get_userbyid(d.datdba) AS "db_owner",
pg_catalog.pg_encoding_to_char(d.encoding) AS "db_encoding",
d.datcollate AS "db_collation",
d.datctype AS "db_collation_type",
pg_catalog.array_to_string(d.datacl, e'\n') AS "db_access_privileges",
CASE
WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN
pg_catalog.pg_size_pretty(pg_catalog.pg_database_size(d.datname))
ELSE 'No Access'
END AS "db_size",
t.spcname AS "db_tablespace",
pg_catalog.shobj_description(d.oid, 'pg_database') AS "db_description"
FROM pg_catalog.pg_database d
JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid
ORDER BY 1;
SELECT
usesysid AS "user_id",
usename AS "user_name",
passwd AS "user_password",
usesuper AS "is_super_user"
FROM pg_catalog.pg_user ORDER BY 1;
DO
$BODY$
DECLARE
p_password varchar :='p@$$w0rd';
p_connection_string varchar :='hostaddr=127.0.0.1 port=5432 dbname=postgres user=postgres password=';
BEGIN
p_connection_string := p_connection_string || p_password;
RAISE NOTICE 'Connection String: %', p_connection_string;
SELECT INTO p_connection_string
'hostaddr=' || host(inet_server_addr()) || ' port=' || inet_server_port() ||
' dbname=' || current_database() || ' user=' || CURRENT_USER || ' password=' || p_password;
RAISE NOTICE 'Connection String: %', p_connection_string;
END
$BODY$;
cat << EOF | psql -U postgres
DROP DATABASE IF EXISTS academia_ebis_staging;
CREATE DATABASE academia_ebis_staging;
CREATE USER academia WITH ENCRYPTED PASSWORD 'sadaqah!';
GRANT ALL PRIVILEGES ON DATABASE academia_ebis_staging TO academia;
ALTER DATABASE academia_ebis_staging OWNER TO academia;
-- ALTER USER academia WITH SUPERUSER;
EOF
Allow Remote
apt install -y iputils-ping telnet dnsutils sudo systemctl status ufw sudo ufw status verbose |
sudo ufw status numbered sudo ufw allow 5432/tcp sudo ufw enable |
| |
nano /etc/postgresql/14/main/postgresql.conf |
nano /etc/postgresql/14/main/pg_hba.conf |
| |
listen_addresses = '*' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 5432 # (change requires restart)
|
# IPv4 local connections:
host all all 127.0.0.1/32 scram-sha-256
host all all 0.0.0.0/0 scram-sha-256
# host all all 10.19.83.1/24 scram-sha-256
# host all all 10.20.13.1/24 scram-sha-256
|
Spread Sheet
=CONCAT("(LOCALTIMESTAMP, 'religion', 'chorke.org', (SELECT (COALESCE(MAX(id), 0) + 1) FROM grouped_collection), '", SUBSTITUTE(A2 , """" , ""), "', '", SUBSTITUTE(A2 , """" , ""), "', '{""state"": """ , SUBSTITUTE(B2 , "'" , "''") , """, ""city"": """ , SUBSTITUTE(C2 , "'" , "''") , """}'),")
=CONCAT("[""", SUBSTITUTE(A2 , """", ""), """, """, SUBSTITUTE(A2, """", ""), "",""", {""state"": """ , SUBSTITUTE(B2 , "'" , "''") , """, ""city"": """ , SUBSTITUTE(C2 , "'" , "''") , """}", "],")
=CONCAT("[""", SUBSTITUTE(C518 , """", ""), """, """, SUBSTITUTE(B518, """", ""), "",""", {}, " , A518 , ", ", IF(ISBLANK(D518), "false", "true"), "],")
=CONCAT("[""", SUBSTITUTE(C5 , """", ""), """, """, SUBSTITUTE(B5, """", ""), "",""", {""class"": """, D5 ,""", ""income"": true}, " , A5, "],")
=CONCAT("[""", SUBSTITUTE(C27 , """", ""), """, """, SUBSTITUTE(B27, """", ""), "",""", {}, " , A27, "],")
=CONCAT("[""", C27, """, """, B27, "",""", {}, " , A27, "],")
Docker PSQL
docker run -d --name postgres\ -e POSTGRES_PASSWORD=sadaqah\ -v ./init.sql:/docker-entrypoint-initdb.d/init.sql\ -p 127.0.0.1:5433:5432\ postgres:12.14 |
docker exec -it postgres bash psql -U postgres docker stop postgres docker rm postgres |
docker exec -it postgres\ psql -U postgres docker ps -a docker rm postgres -f |