NCcloud错误异常:An I/O error occurred while sending to the backend.
PG后台错误异常日志:
2019-07-17 10:51:14.450 +08:192.168.0.197(35564):yc_pg@yc_pg:[15437]:HINT: In a moment you should be able to reconnect to the database and repeat your command.
2019-07-17 10:51:14.456 +08:192.168.0.197(35352):yc_pg@yc_pg:[15321]:WARNING: terminating connection because of crash of another server process
2019-07-17 10:51:14.456 +08:192.168.0.197(35352):yc_pg@yc_pg:[15321]:DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory.
select 'select pg_cancel_backend('|| a0.pid || ');' as cancel_pid,
'select pg_terminate_backend('|| a0.pid || ');' as terminate_pid,
xact_start,round(extract(epoch FROM (now()-xact_start))::numeric,1) as xact_second,
query_start,round(extract(epoch FROM (now()-query_start))::numeric,1) as query_second,
datname, pid, usename, application_name, client_addr, client_port,
xact_start, query_start,
state_change,wait_event_type,wait_event, state, backend_xid, backend_xmin, query
from pg_stat_activity a0
where 1=1
and a0.state<>'idle'
and (a0.backend_xid is not null or a0.backend_xmin is not null)
order by now()-xact_start;
SELECT d.datname AS Name, pg_catalog.pg_get_userbyid(d.datdba) AS Owner, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_size_pretty(pg_catalog.pg_database_size(d.datname)) ELSE 'No Access' END AS SIZE FROM pg_catalog.pg_database d ORDER BY CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE NULL END DESC -- nulls first; LIMIT 20;
select schemaname,relname,indexrelname,pg_size_pretty( pg_relation_size(relid)) from pg_stat_user_indexes order by pg_relation_size(relid) desc limit 10 ;
SELECT
col_description ( A.attrelid, A.attnum ) AS COMMENT,
format_type ( A.atttypid, A.atttypmod ) AS TYPE,
A.attname AS NAME,
A.attnotnull AS NOTNULL
FROM
pg_class AS C,
pg_attribute AS A
WHERE
C.relname = 'sys_user'
AND A.attrelid = C.oid
AND A.attnum > 0
查询所有表名和注释
SELECT
relname AS tabname,
CAST ( obj_description ( relfilenode, 'pg_class' ) AS VARCHAR ) AS COMMENT
FROM
pg_class C
WHERE
relkind = 'r'
AND relname NOT LIKE'pg_%'
AND relname NOT LIKE'sql_%'
ORDER BY
relname
select concat_ws(' ','ALTER TABLE ',tablename,'SET SCHEMA XXXXX;') from pg_tables where schemaname ='public'
select * from PG_STAT_ACTIVITY where datname='yonbip_220709' and client_addr in('123.103.9.7','123.103.9.X','123.103.9.X','125.35.5.253','125.35.5.25X');
select concat_ws(pid,'select pg_terminate_backend(',');') from PG_STAT_ACTIVITY where datname='yonbip_220709' and client_addr in('123.103.9.7','123.103.9.X','123.103.9.X','125.35.5.253','125.35.5.25X');
with
t_wait as
(
select a.mode,a.locktype,a.database,a.relation,a.page,a.tuple,a.classid,a.granted,
a.objid,a.objsubid,a.pid,a.virtualtransaction,a.virtualxid,a.transactionid,a.fastpath,
b.state,b.query,b.xact_start,b.query_start,b.usename,b.datname,b.client_addr,b.client_port,b.application_name
from pg_locks a,pg_stat_activity b where a.pid=b.pid and not a.granted
),
t_run as
(
select a.mode,a.locktype,a.database,a.relation,a.page,a.tuple,a.classid,a.granted,
a.objid,a.objsubid,a.pid,a.virtualtransaction,a.virtualxid,a.transactionid,a.fastpath,
b.state,b.query,b.xact_start,b.query_start,b.usename,b.datname,b.client_addr,b.client_port,b.application_name
from pg_locks a,pg_stat_activity b where a.pid=b.pid and a.granted
),
t_overlap as
(
select r.* from t_wait w join t_run r on
(
r.locktype is not distinct from w.locktype and
r.database is not distinct from w.database and
r.relation is not distinct from w.relation and
r.page is not distinct from w.page and
r.tuple is not distinct from w.tuple and
r.virtualxid is not distinct from w.virtualxid and
r.transactionid is not distinct from w.transactionid and
r.classid is not distinct from w.classid and
r.objid is not distinct from w.objid and
r.objsubid is not distinct from w.objsubid and
r.pid <> w.pid
)
),
t_unionall as
(
select r.* from t_overlap r
union all
select w.* from t_wait w
)
select locktype,datname,relation::regclass,page,tuple,virtualxid,transactionid::text,classid::regclass,objid,objsubid,
string_agg(
'Pid: '||case when pid is null then 'NULL' else pid::text end||chr(10)||
'Lock_Granted: '||case when granted is null then 'NULL' else granted::text end||' , Mode: '||case when mode is null then 'NULL' else mode::text end||' , FastPath: '||case when fastpath is null then 'NULL' else fastpath::text end||' , VirtualTransaction: '||case when virtualtransaction is null then 'NULL' else virtualtransaction::text end||' , Session_State: '||case when state is null then 'NULL' else state::text end||chr(10)||
'Username: '||case when usename is null then 'NULL' else usename::text end||' , Database: '||case when datname is null then 'NULL' else datname::text end||' , Client_Addr: '||case when client_addr is null then 'NULL' else client_addr::text end||' , Client_Port: '||case when client_port is null then 'NULL' else client_port::text end||' , Application_Name: '||case when application_name is null then 'NULL' else application_name::text end||chr(10)||
'Xact_Start: '||case when xact_start is null then 'NULL' else xact_start::text end||' , Query_Start: '||case when query_start is null then 'NULL' else query_start::text end||' , Xact_Elapse: '||case when (now()-xact_start) is null then 'NULL' else (now()-xact_start)::text end||' , Query_Elapse: '||case when (now()-query_start) is null then 'NULL' else (now()-query_start)::text end||chr(10)||
'SQL (Current SQL in Transaction): '||chr(10)||
case when query is null then 'NULL' else query::text end,
chr(10)||'--------'||chr(10)
order by
( case mode
when 'INVALID' then 0
when 'AccessShareLock' then 1
when 'RowShareLock' then 2
when 'RowExclusiveLock' then 3
when 'ShareUpdateExclusiveLock' then 4
when 'ShareLock' then 5
when 'ShareRowExclusiveLock' then 6
when 'ExclusiveLock' then 7
when 'AccessExclusiveLock' then 8
else 0
end ) desc,
(case when granted then 0 else 1 end)
) as lock_conflict
from t_unionall
group by
locktype,datname,relation,page,tuple,virtualxid,transactionid::text,classid,objid,objsubid ;
在PostgreSQL中,对大表中的行进行计数是很慢的。为了获得准确的数字,由于MVCC的性质,它必须对行进行完整计数。有一种方法来大大加快这如果计数也没有必须要确切喜欢它似乎是在你的情况。而不是获得确切的计数(大表速度慢):SELECT count(*) AS exact_count FROM myschema.mytable;您会得到如下估算值(非常快):SELECT reltuples::bigint AS estimate FROM pg_class where relname='mytable';估算的接近程度取决于您是否运行ANALYZE足够。通常非常接近。请参阅PostgreSQL Wiki FAQ。或用于count(*)性能的专用Wiki页面。更好了PostgreSQL的维基文章的是一个有点草率。它忽略了一个数据库中可能存在多个具有相同名称的表的可能性-处于不同的模式。要说明这一点:SELECT c.reltuples::bigint AS estimateFROM pg_class cJOIN pg_namespace n ON n.oid = c.relnamespaceWHERE c.relname = 'mytable'AND n.nspname = 'myschema'还是更好SELECT reltuples::bigint AS estimateFROM pg_classWHERE oid = 'myschema.mytable'::regclass;更快,更简单,更安全,更优雅。请参阅《对象标识符类型》手册。to_regclass('myschema.mytable')在Postgres 9.4+中使用可避免无效表名的例外情况:如何检查给定架构中是否存在表TABLESAMPLE SYSTEM (n) 在Postgres 9.5+SELECT 100 * count(*) AS estimate FROM mytable TABLESAMPLE SYSTEM (1);就像@a_horse commented一样,SELECT如果pg_class由于某些原因当前的统计信息不足,则为该命令新添加的子句可能会很有用。例如:没有autovacuum运行。大INSERT或DELETE。TEMPORARY表格(未被涵盖autovacuum)。这只会查看随机的n%(1在示例中)选择的块并计算其中的行。您可以选择更大的样本来增加成本,并减少误差。准确性取决于更多因素:行大小分布。如果给定的块恰好比平常的行宽,则计数比平常的低,等等。死元组或FILLFACTOR每个块占用空间。如果整个表分布不均,则估计值可能会不正确。一般舍入错误。在大多数情况下,来自的估计pg_class会更快,更准确。回答实际问题首先,我需要知道该表中的行数,如果总计数大于某个预定义常量,以及是否......在计数超过我的常数值时是可能的,它将停止计数(而不是等待完成计数以告知行计数更大)。是。您可以将子查询与结合使用LIMIT:SELECT count(*) FROM (SELECT 1 FROM token LIMIT 500000) t;Postgres 实际上停止计数超过给定的限制,您将获得多达n行(在本示例中为500000)的精确和当前计数,否则为n。但是,速度不及中的估算速度。pg_class
SELECT schemaname, relname, n_live_tup, n_dead_tup, ( case when n_live_tup = 0 then 'invalid' else CONCAT(round((n_dead_tup::numeric/n_live_tup::numeric) * 100, 3)::text, '%') end ) AS "dead / live", pg_size_pretty(pg_relation_size((schemaname || '.' || relname)::regclass)) AS table_size, pg_size_pretty(pg_indexes_size((schemaname || '.' || relname)::regclass)) AS index_size, pg_size_pretty(pg_total_relation_size((schemaname || '.' || relname)::regclass)) AS total_size FROM pg_stat_user_tables order by table_size desc;
select schemaname, relname, n_live_tup, n_dead_tup, ( case when n_live_tup = 0 then 'invalid' else CONCAT(round((n_dead_tup::numeric/n_live_tup::numeric) * 100, 3)::text, '%') end ) AS "dead / live", pg_size_pretty(pg_relation_size(relid)) table_size, pg_size_pretty(pg_indexes_size(relid)) index_size, pg_size_pretty(pg_total_relation_size(relid)) table_total_size from pg_stat_user_tables order by pg_total_relation_size(relid) desc;
--ACCESS DB REVOKECONNECTONDATABASE nova FROM PUBLIC; GRANTCONNECTONDATABASE nova TOUSER;
--ACCESS SCHEMA REVOKEALLON SCHEMA public FROM PUBLIC; GRANT USAGE ON SCHEMA public TOUSER;
--ACCESS TABLES REVOKEALLONALLTABLESIN SCHEMA public FROM PUBLIC ; GRANTSELECTONALLTABLESIN SCHEMA public TO read_only ; GRANTSELECT,INSERT,UPDATE,DELETEONALLTABLESIN SCHEMA public TO read_write ; GRANTALLONALLTABLESIN SCHEMA public TO admin ;
30 个回复
nccloud
1. SQL执行计划
Sql执行过后的真是sql执行计划
explain analyze (select * from ……)
2. 通过系统数据字典查询索引信息
select A.SCHEMANAME,
A.TABLENAME,
A.INDEXNAME,
A.TABLESPACE,
A.INDEXDEF,
B.AMNAME,
C.INDEXRELID,
C.INDNATTS,
C.INDISUNIQUE,
C.INDISPRIMARY,
C.INDISCLUSTERED,
D.DESCRIPTION
from PG_AM B
left join PG_CLASS F
on B.OID = F.RELAM
left join PG_STAT_ALL_INDEXES E
on F.OID = E.INDEXRELID
left join PG_INDEX C
on E.INDEXRELID = C.INDEXRELID
left outer join PG_DESCRIPTION D
on C.INDEXRELID = D.OBJOID, PG_INDEXES A
where A.SCHEMANAME = E.SCHEMANAME
and A.TABLENAME = E.RELNAME
and A.INDEXNAME = E.INDEXRELNAME
and E.RELNAME = 'bd_customer';
3. 通过系统数据字典查询表结构
select col.table_schema,
col.table_name,
col.ordinal_position,
col.column_name,
col.data_type,
col.character_maximum_length,
col.numeric_precision,
col.numeric_scale,
col.is_nullable,
col.column_default,
des.description,
table_catalog
from information_schema.columns col
left join pg_description des
on col.table_name ::regclass = des.objoid
and col.ordinal_position = des.objsubid
where table_schema = 'huaweiyun_uapbig'
and table_name = 'bd_customer'
order by ordinal_position;
4. 查询当前用户的列信息
select col.table_schema,col.table_name,col.column_name
from information_schema.columns col
where table_schema = USER ---只查当前用户
and table_name = 'bd_customer'
5. 修改列长度
ALTER TABLE pub_vochange_b alter COLUMN RULEDATA type character varying(1024);
nccloud
2.统计信息
2.1查看自动、手动更新vacumm的时间
select schemaname, relname,last_autoanalyze as 上次自动更新时间,last_analyze as 上次手动更新时间 from pg_stat_user_tables where relname='bd_customer';
2.2查看具体哪张表的统计信息
select * from pg_stats where tablename='test' and attname='pos'; # attname是列字段
2.3vacuum统计信息的效果
1. 1.1释放,再利用 更新/删除的行所占据的磁盘空间.
第一点的原因是PostgreSQL数据的插入,更新,删除操作并不是真正放到数据库空间.如果不定期释放空间的话,由于数据太多,查询速度会巨降.
2. 1.2更新POSTGRESQL查询计划中使用的统计数据
第二点的原因是PostgreSQL在做查询处理的时候,为了是查询速度提高,会根据统计数据来确定执行计划.如果不及时更新的话,查询的效果可能不如预期.
3. 1.3防止因事务ID的重置而使非常老的数据丢失。
第三点的原因是PostgreSQL中每一个事务都会产生一个事务ID,但这个数字是有上限的. 当事务ID达到最大值后,会重新从最小值开始循环.这样如果不及时把以前的数据释放掉的话,原来的老数据会因为事务ID的丢失而丢失掉.
2.4收集统计信息命令
vacuum analyze full;
这个语句可以完整收集,但是会锁表,并且时间很长
vacuum analyze;
不锁表
vacuum analyze table_name;
指定表进行收集
3.性能优化示例
3.1union排序导致work_mem内存不够,使用磁盘
SQL: select bd_customer.pk_customer as pk_customer,
bd_customer.ts as ts,
bd_customer.dr as dr,
bd_customer.delperson as delperson,
bd_customer.pk_billtypecode as pk_billtypecode,
bd_customer.isvat as isvat,
bd_customer.deletestate as deletestate,
bd_customer.legalbody as legalbody,
bd_customer.tel1 as tel1,
bd_customer.tel2 as tel2,
bd_customer.frozenflag as frozenflag,
bd_customer.pk_currtype as pk_currtype,
bd_customer.vatcode as vatcode,
bd_customer.def30 as def30,
bd_customer.deltime as deltime,
bd_customer.tel3 as tel3,
bd_customer.pk_customerpf as pk_customerpf,
bd_customer.def7 as def7,
bd_customer.def8 as def8,
bd_customer.issupplier as issupplier,
bd_customer.def5 as def5,
bd_customer.def6 as def6,
bd_customer.taxpayerid as taxpayerid,
bd_customer.pk_custtaxes as pk_custtaxes,
bd_customer.def9 as def9,
bd_customer.code as code,
bd_customer.isfreecust as isfreecust,
bd_customer.registerfund as registerfund,
bd_customer.url as url,
bd_customer.modifier as modifier,
bd_customer.def28 as def28,
bd_customer.pk_areacl as pk_areacl,
bd_customer.ecotypesincevfive as ecotypesincevfive,
bd_customer.def27 as def27,
bd_customer.def26 as def26,
bd_customer.def25 as def25,
bd_customer.email as email,
bd_customer.def29 as def29,
bd_customer.def20 as def20,
bd_customer.modifiedtime as modifiedtime,
bd_customer.pk_custclass as pk_custclass,
bd_customer.enablestate as enablestate,
bd_customer.pk_group as pk_group,
bd_customer.def24 as def24,
bd_customer.def4 as def4,
bd_customer.def23 as def23,
bd_customer.def3 as def3,
bd_customer.def22 as def22,
bd_customer.def2 as def2,
bd_customer.def21 as def21,
bd_customer.def1 as def1,
bd_customer.def12 as def12,
bd_customer.def13 as def13,
bd_customer.custprop as custprop,
bd_customer.def10 as def10,
bd_customer.def11 as def11,
bd_customer.memo as memo,
bd_customer.trade as trade,
bd_customer.def18 as def18,
bd_customer.def19 as def19,
bd_customer.def16 as def16,
bd_customer.def17 as def17,
bd_customer.dataoriginflag as dataoriginflag,
bd_customer.def14 as def14,
bd_customer.creator as creator,
bd_customer.def15 as def15,
bd_customer.pk_org as pk_org,
bd_customer.name as name,
bd_customer.shortname as shortname,
bd_customer.fax2 as fax2,
bd_customer.fax1 as fax1,
bd_customer.corpaddress as corpaddress,
bd_customer.pk_format as pk_format,
bd_customer.pk_customer_main as pk_customer_main,
bd_customer.pk_country as pk_country,
bd_customer.creationtime as creationtime,
bd_customer.pk_supplier as pk_supplier,
bd_customer.mnecode as mnecode,
bd_customer.name5 as name5,
bd_customer.name6 as name6,
bd_customer.name3 as name3,
bd_customer.name4 as name4,
bd_customer.isretailstore as isretailstore,
bd_customer.ename as ename,
bd_customer.pk_financeorg as pk_financeorg,
bd_customer.custstate as custstate,
bd_customer.name2 as name2,
bd_customer.pk_timezone as pk_timezone
from bd_customer bd_customer
where PK_CUSTOMER in ('100111100000000057ZC')
and (pk_customer in
(select pk_customer
from bd_custorg
where pk_org in ('0001111000000000093Y')
and enablestate = 2
union
select pk_customer
from bd_customer
where (pk_org = '000111100000000001N5' or
pk_org = '0001111000000000093Y')))
执行计划如下:
explain analyze (需要分析的SQL);
如上执行计划中表红框,表示排序使用磁盘排序,引起原因是因为work_mem默认设置为4M,导致该空间不够。
临时设置:set work_mem ='32MB';
查询设置值:show work_mem;
修改work_mem参数后,执行计划如上,sql执行时间由原来的3.7s提升到512ms
nccloud
3.2启用HINT生效
问题描述:
压测过程中PG数据库内存使用增长到100%,PG数据库强制回收内存,释放所有进程,导致应用出现异常(后台查看数据库未启用HINT导致数据性能降低,占用大量内存)
现象:
NCcloud错误异常:An I/O error occurred while sending to the backend.
PG后台错误异常日志:
2019-07-17 10:51:14.450 +08:192.168.0.197(35564):yc_pg@yc_pg:[15437]:HINT: In a moment you should be able to reconnect to the database and repeat your command.
2019-07-17 10:51:14.456 +08:192.168.0.197(35352):yc_pg@yc_pg:[15321]:WARNING: terminating connection because of crash of another server process
2019-07-17 10:51:14.456 +08:192.168.0.197(35352):yc_pg@yc_pg:[15321]:DETAIL: The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory.
解决办法:
选择对应的database使用root登录数据库启动HINT,使用如下命令
select control_extension('create','pg_hint_plan');
nccloud
4.1查询当前连接数据库会话
select * from pg_stat_activity where state ='active' order by query_start;
select oid from pg_class where relname = 'sm_user';
select * from pg_stat_activity order by query_start;
select * from pg_locks where relation = (select oid from pg_class where relname = 'sm_user');
杀死查询到的会话
select pg_terminate_backend(pid) from pg_stat_activity where usename='nccloud_202105s3' and client_addr not in('10.168.1.251');
select pg_terminate_backend(13126);
select pg_postmaster_start_time();
4.2Kill执行的sql
查询对应运行SQL的pid
select pid,query from pg_stat_activity where query like '%elect pk_bankdoc from bd_bankdoc t whe%';
根据查询的pid杀掉进程
select pg_terminate_backend(pid);
4.3设置最大可同时运行的autovacuum工作进程数量
autovacuum_max_workers 建议设置20
4.4work_mem、连接数关系内存使用关系
5.PostgreSQL语法维护
5.1字段类型
1. NUMBER 建表语句自动转换为numeric
执行ALTER TABLE FP_EINVOICESTATUS ADD jshj NUMBER(15,2); 时可以成功执行,但是PostgreSQL自动转换为numeric类型
2. TS字段类型:
ts char(19) default to_char(sysdate,'yyyy-mm-dd hh24:mi:ss')
ts char(19) default to_char(now(),'yyyy-mm-dd hh24:mi:ss')
5.2 PostgreSQL添加删除列
添加列:
alter table FP_EINVOICESTATUS add column jshj decimal(15,2);
删除列:
alter table FP_EINVOICESTATUS drop column jshj;
5.3添加列
Oracle语法:
alter table bd_balatype add (beiran_code VARCHAR2(40));
PG语法:
alter table bd_balatype add beiran_code VARCHAR2(40);
说明:添加的列不允许使用()括起来
5.4创建表字段类型
Create table 时字段类型为int时,不允许指定大小,将(50)删除即可
5.5截取函数
substr (strings,0,2) :PG数据库第一位认为是1,所以写成substr (strings,0,2)结果只截取到一位字符串
nccloud
1. 注解
排序区内存参数:work_mem
256G --256M
2. Oracle迁移PG
迁移注意事项:
Oracle数据库用户名必须要先在PG数据库创建同名Database
使用root连接上数据库postgres,创建数据库
CREATE TABLESPACE nnc_index01 LOCATION '/data/nnc_index01';
CREATE TABLESPACE nnc_data01 LOCATION '/data/nnc_data01';
CREATE DATABASE nccdb TABLESPACE nnc_data01;
切换至nccdb数据库,登录root用户
create user nccloud with createdb createrole password 'NCcloud@123';
GRANT all ON TABLESPACE nnc_data01 TO nccloud;
GRANT all ON TABLESPACE nnc_index01 TO nccloud;
GRANT ALL ON DATABASE nccdb TO nccloud;
迁移过程中,目标数据库用户使用新创建的用户
3. Schema属性
1) 创建schema
# create schema schema01;
2) 在schema中创建对象
#create table schema01.t1(id int);
3) 删除schema
db01=# drop schema schema01;
ERROR: cannot drop schema schema01 because other objects depend on it
DETAIL: table schema01.t1 depends on schema schema01
HINT: Use DROP ... CASCADE to drop the dependent objects too.
db01=# drop schema schema01 cascade;
NOTICE: drop cascades to table schema01.t1
schema下有对象如果需要一起删除,需要带上cascade关键字.有点像使用rmdir删除目录一样,文件夹下有东西不然删除。
4) 创建schema指定owner
默认是谁创建的schema,owner就是谁,当然也可以指定
db01=# create schema s01 authorization hippo;
CREATE SCHEMA
db01=# create schema authorization hippo;
CREATE SCHEMA
db01=# \dn
List of schemas
Name | Owner
--------+----------
hippo | hippo
public | postgres
s01 | hippo
(3 rows)
指定了owner,不指定schema,则schema名字与owner一致。
4. 查询数据库Owner及字符集
SELECT d.datname as "Name",
pg_catalog.pg_get_userbyid(d.datdba) as "Owner",
pg_catalog.pg_encoding_to_char(d.encoding) as "Encoding",
d.datcollate as "Collate",
d.datctype as "Ctype",
pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges"
FROM pg_catalog.pg_database d
ORDER BY 1;
1. 复制数据库
create database test1 template='test';
test是生产库
test1是要创建的测试库
nccloud
锁问题排查
有很多LWLOCK
buffer发生了频繁替换,导致了锁等待,可能存在大量表扫描,buffer开小了,io被占了,磁盘太烂了等原因
(实际。添加了调试参数导致)
nccloud
1、创建一个用户名为test密码为******的用户
CREATE USER test WITH ENCRYPTED PASSWORD ‘******’;
2、更新用户默认为只读事务
alter user test set default_transaction_read_only=on;
3、把所有库的public的USAGE权限给到test
GRANT USAGE ON SCHEMA public to test;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO test;
4、授予用户连接数据库权限(要进入到具体数据库操作在哪个db环境下执行就授予那个db的权限)
GRANT CONNECT ON DATABASE mytest to test;
5、切换到指定库
\c mytest
6、赋予用户表、序列查看权限:
GRANT USAGE ON SCHEMA public to test;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO test;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO test;
7、赋予用户某个视图的查看权限
GRANT SELECT ON XXXX TO test;
nccloud
查看当前实例内,各个数据库的大小:
nccloud
查看PG数据库版本
nccloud
统计各数据库占用磁盘大小:
统计数据库中各表占用磁盘大小:
统计testdb库中各索引占用磁盘大小:
nccloud
Postgresql 查询表的字段信息,字段名,类型,注释等
Postgresql 查询sys_user表的字段信息,字段名,类型,注释,是否为空
SELECT
col_description ( A.attrelid, A.attnum ) AS COMMENT,
format_type ( A.atttypid, A.atttypmod ) AS TYPE,
A.attname AS NAME,
A.attnotnull AS NOTNULL
FROM
pg_class AS C,
pg_attribute AS A
WHERE
C.relname = 'sys_user'
AND A.attrelid = C.oid
AND A.attnum > 0
查询所有表名和注释
SELECT
relname AS tabname,
CAST ( obj_description ( relfilenode, 'pg_class' ) AS VARCHAR ) AS COMMENT
FROM
pg_class C
WHERE
relkind = 'r'
AND relname NOT LIKE'pg_%'
AND relname NOT LIKE'sql_%'
ORDER BY
relname
nccloud
PG数据库,查询结果进行拼接
select concat_ws(' ','ALTER TABLE ',tablename,'SET SCHEMA XXXXX;') from pg_tables where schemaname ='public'
select * from PG_STAT_ACTIVITY where datname='yonbip_220709' and client_addr in('123.103.9.7','123.103.9.X','123.103.9.X','125.35.5.253','125.35.5.25X');
select concat_ws(pid,'select pg_terminate_backend(',');') from PG_STAT_ACTIVITY where datname='yonbip_220709' and client_addr in('123.103.9.7','123.103.9.X','123.103.9.X','125.35.5.253','125.35.5.25X');
nccloud
查询某个数据库大小:select pg_size_pretty(pg_database_size(pg_database.datname)) AS 数据库大小: from pg_database where datname='数据库名';
-- 查询单个数据库大小
postgres=# select pg_size_pretty(pg_database_size('postgres'));
pg_size_pretty
----------------
8275 kB
(1 row)
-- 查询所有库大小,
postgres=# select datname, pg_size_pretty (pg_database_size(datname)) AS size from pg_database;
nccloud
查询表空间:
postgres=> SELECT spcname FROM pg_tablespace;
或:
postgres=> \db
nccloud
查询当前活跃用户,活跃会话
select * from pg_stat_activity;
nccloud
pg查看某个表的索引
select * from pg_indexes where tablename='表名';
select * from pg_statio_all_indexes where relname='表名 ';
nccloud
PG设置日志输出 ---需要超级用户权限
alter system set log_statement= 'all';
SELECT pg_reload_conf();
show log_statement;
show log_min_duration_statement;
#如果是私有化的,则日志配置文件在/var/lib/pgsql/data/postgresql.conf的 log_directory设置
log_statement参数控制记录哪些SQL语句。有效值是none(off),ddl,mod和all(所有语句)。
ddl记录所有数据定义语句,例如CREATE,ALTER和DROP语句。
mod记录所有ddl语句,以及数据修改语句,如INSERT,UPDATE,DELETE,TRUNCATE和COPY FROM。
PREPARE,EXECUTE和EXPLAIN如果包含的命令是适当的类型,则也会记录ANALYZE语句。对于使用扩展查询协议的客户端,在收到执行消息时会发生日志记录,并且包含绑定参数的值(使任何嵌入的单引号标记翻倍)。
默认值是none。只有超级用户可以更改此设置。
注意:即使使用log_statement = all设置,也不会记录包含简单语法错误的语句,因为只有在完成基本分析以确定语句类型后才会发出日志消息。在扩展查询协议的情况下,该设置同样不记录在执行阶段之前(即,在分析或计划期间)失败的语句。将log_min_error_statement设置为ERROR(或更低)以记录此类语句。
nccloud
修改表归属
alter table XXX owner to XXX;
nccloud
查看锁
nccloud
查看锁 及SQL
select T.PID, T.STATE, T.QUERY, T.WAIT_EVENT_TYPE, T.WAIT_EVENT,
T.QUERY_START
from PG_STAT_ACTIVITY T
where
T.WAIT_EVENT_TYPE = 'Lock';
nccloud
-- 通过pid解锁对应的ACTIVITY
select PG_CANCEL_BACKEND('6984');
nccloud
1、查看某个库大小
select pg_size_pretty(pg_database_size('postgres'));
2、查询实例下的所有库大小
select datname, pg_size_pretty (pg_database_size(datname)) AS size from pg_database;
3、查询整个实例库的大小之和
select pg_size_pretty (sum(pg_database_size(datname))) AS size from pg_database;
nccloud
跨schema赋权
--创建用户
create user user1;
--修改密码
alter user report with password 'password';
--授权查询权限
grant usage on schema schema1 to user1;
grant usage on schema schema2 to user1;
修改search_path可跨schema操作
set search_path = "$user",user1,user2
--授权schema:schema1给user1权限 这个权限太大需要慎用
grant all on schema schema1 to user1;
--授权schema的表权限给user1 用户权限太多需慎用
grant all on all tables in schema schema1 to user1;
--授权schema的表权限给user1 用户权限太多需慎用
grant all on all tables in schema schema1 to user1;
--授权某个schema的单个表查权限
grant select on schema2.table1 to user1;
--收回所有授权
revoke all on all tables in schema schema1 from user1;
--为某个特定用户设置search_path
alter user user1 set search_path="$user",user1,user2;
nccloud
查询排序规则
查列上的非默认排序规则,可以使用以下查询:
编辑:要查找数据库的排序规则,需要查询`pg_database:
这是手册相关部分的链接
nccloud
在人大金仓数据库(PG内核)推荐调整参数 client_min_messages ='ERROR'
PostgreSQL提供了参数client_min_messages用以控制返回给客户的消息级别,不同的级别返回的消息大小不同,默认为NOTICE。在通常情况下,如果客户端执行的SQL语句不频繁的话,使用默认参数即可,但如果客户端执行大量的SQL,设置该参数为较高级别就显得有意义了:可以减少消息的大小从而降低网络负载,提升吞吐。
nccloud
在PostgreSQL中,对大表中的行进行计数是很慢的。为了获得准确的数字,由于MVCC的性质,它必须对行进行完整计数。有一种方法来大大加快这如果计数也没有必须要确切喜欢它似乎是在你的情况。而不是获得确切的计数(大表速度慢):SELECT count(*) AS exact_count FROM myschema.mytable;您会得到如下估算值(非常快):SELECT reltuples::bigint AS estimate FROM pg_class where relname='mytable';估算的接近程度取决于您是否运行ANALYZE足够。通常非常接近。请参阅PostgreSQL Wiki FAQ。或用于count(*)性能的专用Wiki页面。更好了PostgreSQL的维基文章的是一个有点草率。它忽略了一个数据库中可能存在多个具有相同名称的表的可能性-处于不同的模式。要说明这一点:SELECT c.reltuples::bigint AS estimateFROM pg_class cJOIN pg_namespace n ON n.oid = c.relnamespaceWHERE c.relname = 'mytable'AND n.nspname = 'myschema'还是更好SELECT reltuples::bigint AS estimateFROM pg_classWHERE oid = 'myschema.mytable'::regclass;更快,更简单,更安全,更优雅。请参阅《对象标识符类型》手册。to_regclass('myschema.mytable')在Postgres 9.4+中使用可避免无效表名的例外情况:如何检查给定架构中是否存在表TABLESAMPLE SYSTEM (n) 在Postgres 9.5+SELECT 100 * count(*) AS estimate FROM mytable TABLESAMPLE SYSTEM (1);就像@a_horse commented一样,SELECT如果pg_class由于某些原因当前的统计信息不足,则为该命令新添加的子句可能会很有用。例如:没有autovacuum运行。大INSERT或DELETE。TEMPORARY表格(未被涵盖autovacuum)。这只会查看随机的n%(1在示例中)选择的块并计算其中的行。您可以选择更大的样本来增加成本,并减少误差。准确性取决于更多因素:行大小分布。如果给定的块恰好比平常的行宽,则计数比平常的低,等等。死元组或FILLFACTOR每个块占用空间。如果整个表分布不均,则估计值可能会不正确。一般舍入错误。在大多数情况下,来自的估计pg_class会更快,更准确。回答实际问题首先,我需要知道该表中的行数,如果总计数大于某个预定义常量,以及是否......在计数超过我的常数值时是可能的,它将停止计数(而不是等待完成计数以告知行计数更大)。是。您可以将子查询与结合使用LIMIT:SELECT count(*) FROM (SELECT 1 FROM token LIMIT 500000) t;Postgres 实际上停止计数超过给定的限制,您将获得多达n行(在本示例中为500000)的精确和当前计数,否则为n。但是,速度不及中的估算速度。pg_class
nccloud
查询死元组
SELECT schemaname, relname, n_live_tup, n_dead_tup, ( case when n_live_tup = 0 then 'invalid' else CONCAT(round((n_dead_tup::numeric/n_live_tup::numeric) * 100, 3)::text, '%') end ) AS "dead / live", pg_size_pretty(pg_relation_size((schemaname || '.' || relname)::regclass)) AS table_size, pg_size_pretty(pg_indexes_size((schemaname || '.' || relname)::regclass)) AS index_size, pg_size_pretty(pg_total_relation_size((schemaname || '.' || relname)::regclass)) AS total_size FROM pg_stat_user_tables order by table_size desc;
select schemaname, relname, n_live_tup, n_dead_tup, ( case when n_live_tup = 0 then 'invalid' else CONCAT(round((n_dead_tup::numeric/n_live_tup::numeric) * 100, 3)::text, '%') end ) AS "dead / live",
pg_size_pretty(pg_relation_size(relid)) table_size,
pg_size_pretty(pg_indexes_size(relid)) index_size,
pg_size_pretty(pg_total_relation_size(relid)) table_total_size
from pg_stat_user_tables
order by pg_total_relation_size(relid) desc;
nccloud
优化时间
select schemaname, relname, n_live_tup, n_dead_tup, ( case when n_live_tup = 0 then 'invalid' else CONCAT(round((n_dead_tup::numeric/n_live_tup::numeric) * 100, 3)::text, '%') end ) AS "dead / live", last_vacuum, last_autovacuum, last_analyze, last_autoanalyze,
pg_size_pretty(pg_relation_size(relid)) table_size,
pg_size_pretty(pg_indexes_size(relid)) index_size,
pg_size_pretty(pg_total_relation_size(relid)) table_total_size
from pg_stat_user_tables
order by pg_total_relation_size(relid) desc;
nccloud
限制用户连接数:ALTER ROLE test CONNECTION LIMIT 1;
nccloud
只读限制,对于生产系统,可以使用以下配置:
2
3
4
5
6
7
8
9
10
11
12
13
REVOKE CONNECT ON DATABASE nova FROM PUBLIC;
GRANT CONNECT ON DATABASE nova TO USER;
--ACCESS SCHEMA
REVOKE ALL ON SCHEMA public FROM PUBLIC;
GRANT USAGE ON SCHEMA public TO USER;
--ACCESS TABLES
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC ;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO read_only ;
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO read_write ;
GRANT ALL ON ALL TABLES IN SCHEMA public TO admin ;