WITH DATA AS (
SELECT USER owner, segment_name, segment_type, SEGMENT_SUBTYPE,
SUM(bytes)/1024/1024/1024
FROM user_segments s
GROUP BY segment_name, segment_type, SEGMENT_SUBTYPE
ORDER BY SUM(bytes) DESC
)
SELECT
DATA.*,
CASE segment_type
WHEN 'LOBSEGMENT' THEN
'ALTER TABLE ' || LOWER(l.owner || '.' || l.table_name) || ' MOVE LOB(' || l.column_name || ') STORE AS (TABLESPACE users) ONLINE PARALLEL 16;'
WHEN 'LOB PARTITION' THEN
'ALTER TABLE ' || LOWER (p.table_owner || '.' || p.table_name ) || ' MOVE PARTITION ' || p.partition_name
|| ' LOB ( ' || LOWER ( p.column_name ) || ') STORE AS ( TABLESPACE users ) UPDATE INDEXES ONLINE PARALLEL 16;'
WHEN 'INDEX' THEN
'ALTER INDEX ' || LOWER(i.owner || '.' || i.index_name) || ' REBUILD /*ONLINE*/ PARALLEL 16;' || CHR(10) ||
'ALTER INDEX ' || LOWER(i.owner || '.' || i.index_name) || ' NOPARALLEL;'
WHEN 'TABLE' THEN
'ALTER TABLE ' || LOWER(l.owner || '.' || l.table_name) || ' MOVE ONLINE PARALLEL 16;'
END cmd
FROM DATA,
all_lobs l,
all_lob_partitions p,
all_indexes i
WHERE DATA.owner = l.owner(+)
AND data.segment_name = l.SEGMENT_NAME(+)
AND data.segment_name = p.lob_name(+)
AND data.owner = i.OWNER(+)
AND data.segment_name = i.index_name(+);
oracle.notes
Что бы не забыть ничего полезного
2024-07-25
Database move objects
2022-12-05
Powershell: docker - print tags for image
$imageName='camunda/camunda-bpm-platform'
$nextLink="https://registry.hub.docker.com/v2/repositories/$imageName/tags"
echo "Receiving: $nextLink"
$res=$null
Do {
$content = curl $nextLink | ConvertFrom-Json
$nextLink = $content.next
#echo $nextLink
$res+= $content.results.name
} While ($nextLink)
echo $res | sort
2022-11-25
Spring enum properties possible values
From this reply
RelaxedDataBinder#bind()
RelaxedConversionService#convert()
1. try DefaultConvertionService#convert()
# only support `A_VALUE`
StringToEnumConverterFactory#StringToEnum#convert()
2. then GenericConversionService#convert()
# the config key can be :
# 0 = "a-value"
# 1 = "a_value"
# 2 = "aValue"
# 3 = "avalue"
# 4 = "A-VALUE"
# 5 = "A_VALUE"
# 6 = "AVALUE"
RelaxedConversionService$StringToEnumIgnoringCaseConverterFactory$StringToEnum#convert()
2022-08-26
Oracle 19c: JSON and character sets
Given
Oracle 19c in WE8MSWIN1252 charset.
Processing the JSON in UTF-8 using different functions.
Test
DECLARE
c_json_utf8 VARCHAR2(32000) := CONVERT('{"a":"' || CHR(246) || '"}', 'AL32UTF8');
l_obj json_object_t;
BEGIN
l_obj := NEW json_object_t(c_json_utf8);
dbms_output.put_line('json_object_t.get_string: ' || l_obj.get_string('a'));
dbms_output.put_line('json_value: ' || json_value(c_json_utf8, '$.a'));
FOR rec IN (
SELECT *
FROM JSON_TABLE(c_json_utf8, '$'
COLUMNS(a VARCHAR2 path '$.a')
)
) LOOP
dbms_output.put_line('json_table: ' || rec.a);
END LOOP;
END;
/
Result
json_object_t.get_string: ö
json_value: ö
json_table: ö
2022-06-14
Extract information about deadlocks from traces
If you don’t have access to filesystem or love to do everything in SQL here is the quick and dirty solution how to extract information about ORA-00060 from Oracle trace files.
I’m to lazy to write a normal script with variables thus just add the comments in the code
-- Get trace files dir
SELECT VALUE FROM v$diag_info WHERE NAME = 'Diag Trace';
-- create directory object for trace files dir
CREATE DIRECTORY test_trace_dir AS '&VALUE_FROM_PREVIOUS_STEP';
-- file to clob extractor procedure was found in stackoverflow
CREATE OR REPLACE FUNCTION test_trace_read(p_file_name IN VARCHAR2) RETURN CLOB AS
l_clob CLOB;
l_bfile BFILE;
dst_offset NUMBER := 1;
src_offset NUMBER := 1;
lang_ctx NUMBER := dbms_lob.default_lang_ctx;
warning NUMBER;
BEGIN
l_bfile := bfilename('TEST_TRACE_DIR', p_file_name);
dbms_lob.fileopen(l_bfile);
dbms_lob.createtemporary(l_clob, TRUE);
dbms_lob.loadclobfromfile(dest_lob => l_clob,
src_bfile => l_bfile,
amount => dbms_lob.getlength(l_bfile),
dest_offset => dst_offset,
src_offset => src_offset,
bfile_csid => dbms_lob.default_csid,
lang_context => lang_ctx,
warning => warning);
dbms_lob.fileclose(l_bfile);
RETURN l_clob;
END;
/
-- query to return piece of tracefile
-- full tracefile content can be returned as well
-- number of rows is specified in variable
WITH traces AS (
SELECT t.originating_timestamp, t.module_id, t.message_text,
regexp_replace(t.message_text, '.*More info in file (.*)\.$', '\1') full_file_name,
regexp_replace(t.message_text, '.*More info in file .*\/(.*)\.\s*$', '\1') file_name -- related from message format!
FROM V$DIAG_ALERT_EXT t
WHERE message_text LIKE '%ORA-00060%'
ORDER BY 1 DESC
),
trace_files AS (
SELECT test_trace_read(file_name) CONTENT, t.*
FROM traces t
WHERE ROWNUM <= &NUM_ROWS
)
SELECT SUBSTR(CONTENT, INSTR(CONTENT, 'Deadlock graph:'), INSTR(CONTENT, '----- Call Stack Trace -----') - INSTR(CONTENT, 'Deadlock graph:')) substr_trace,
t.originating_timestamp, t.module_id, t.full_file_name, t.message_text
FROM trace_files t
;
2022-06-08
Powershell: basic listener
Found here
$Listener = [System.Net.Sockets.TcpListener]8080;
$Listener.Start();
$Listener.AcceptSocket();
2022-05-09
Jenkins: print password
stage('Sandbox') {
withCredentials([usernamePassword(credentialsId: 'TestUser', passwordVariable: 'C_PASS', usernameVariable: 'C_USER')]) {
creds = "\nUser: ${C_USER}\nPassword: ${C_PASS}\n"
}
println creds
}
2022-05-02
GIT: Run the changes from current branch
Run the difference between the current feature branch and target branch (configured to dev) to which the feature branches are merged in the current dev process.
The $file_list array is built with the files:
- from merge commit if feature branch is already merged
- from fork-point if the branch is not merged
In my scenario all the files are run in SQL*PLUS, but $file_list variable can be used for any purpose
# Run changes made in current branch comparing to $traget_branch_name = dev branch in the database given in the first parameter
# If changes is already merged thus it merge-base --fork-point produce wrong results - the git log -m -1 --first-parent is used for merge commit to find difference
# If no merge commit present merge-base --fork-point is used to build the difference
$traget_branch_name = 'dev'
$repo = 'C:\code\myrepo'
cd $repo
$dbname=$args[0]
$script = "connect $dbname"
$merge_commit = git log HEAD..$traget_branch_name --ancestry-path --merges --reverse --pretty=format:%h | Select-Object -First 1
if ( $merge_commit -eq $null )
{
'...No subsequent merges...'
$file_list = (git diff --name-only $(git merge-base --fork-point $traget_branch_name))
} else {
"Merge commit found $merge_commit"
$file_list = git log -m -1 --first-parent --name-only --pretty="format:" $merge_commit
}
$file_list
#process file list in sql*plus
$file_list |
ForEach {
$fullname = "@$repo$_"
$script += "PROMPT Running $fullname`n"
$script += "@$fullname`n"
}
$script += 'EXIT'
echo $script
echo $script | sqlplus /nolog
2022-03-24
Print JSON attributes names and types
Script to traverse and print types (except one with null values) and change values to attribute names (useful for testing purposes)
DECLARE
l_clob CLOB;
l_json json_object_t;
l_number NUMBER := 1;
FUNCTION process_object(p_json json_object_t, p_level NUMBER := 0, p_parent_object_name VARCHAR2 := NULL) RETURN json_object_t IS
l_keys JSON_KEY_LIST;
l_padding VARCHAR2(100):= LPAD(' ', p_level * 3, ' ');
l_result json_object_t := p_json;
BEGIN
l_keys := p_json.get_keys;
FOR i IN 1 .. l_keys.count LOOP
IF p_json.get(l_keys(i)).is_object THEN
dbms_output.put_line(l_padding || l_keys(i) || ' is object');
l_result.put(
l_keys(i),
process_object(TREAT(p_json.get(l_keys(i)) AS json_object_t), p_level + 1, LTRIM(p_parent_object_name || '.' || l_keys(i), '.'))
);
ELSIF p_json.get(l_keys(i)).is_number THEN
dbms_output.put_line(l_padding || l_keys(i) || ' is number');
l_result.put(l_keys(i), l_number);
l_number := l_number + 1;
--
ELSIF p_json.get(l_keys(i)).is_string THEN
dbms_output.put_line(l_padding || l_keys(i) || ' is string');
l_result.put(l_keys(i), LTRIM(p_parent_object_name || '_', '_') || l_keys(i));
ELSIF p_json.get(l_keys(i)).is_array THEN
dbms_output.put_line(l_padding || l_keys(i) || ' is array');
DECLARE
l_arr JSON_ARRAY_T := TREAT(p_json.get(l_keys(i)) AS JSON_ARRAY_T);
l_new_array JSON_ARRAY_T := NEW JSON_ARRAY_T;
BEGIN
FOR j IN 0 .. l_arr.get_size - 1 LOOP
IF l_arr.get(j).is_object THEN
l_new_array.append(
process_object(TREAT(l_arr.get(j) AS json_object_t), p_level + 1, LTRIM(p_parent_object_name || '.' || l_keys(i) || '[' || j || ']', '.'))
);
END IF;
-- check other types?
END LOOP;
l_result.put(l_keys(i), l_new_array);
END;
ELSE
dbms_output.put_line(l_padding || l_keys(i) || ' is unknown');
l_result.put(l_keys(i), LTRIM(p_parent_object_name || '_', '_') || l_keys(i));
END IF;
END LOOP;
RETURN l_result;
END;
BEGIN
l_json := json_object_t.parse(l_clob);
dbms_output.put_line( process_object(l_json).stringify );
END;
/
2021-12-17
Generate queries for PK-FK data check
Given:
- table to be inspected
- l_owner
- l_table_name
- row in this table
- l_table_condition
Result: generated queries to extract parent rows (Reference to section) and child rows (Referenced by section)
- l_table_condition
DECLARE
l_owner VARCHAR2(30) := UPPER('schema_name');
l_table_name VARCHAR2(30) := UPPER('table_name');
l_table_condition VARCHAR2(32767) := 't.id = 133';
l_chi_cols VARCHAR2(32767);
l_par_cols VARCHAR2(32767);
FUNCTION get_constraint_columns(p_cons_owner VARCHAR2, p_cons_name VARCHAR2) RETURN VARCHAR2 IS
l_result VARCHAR2(32767);
BEGIN
SELECT LISTAGG(cols.column_name, ', ') WITHIN GROUP (ORDER BY cols.position)
INTO l_result
FROM dba_cons_columns cols
WHERE cols.owner = p_cons_owner
AND cols.constraint_name = p_cons_name;
RETURN l_result;
END;
BEGIN
dbms_output.put_line('-- Reference to');
dbms_output.put_line(LPAD('-', 10, '-'));
FOR rec IN (
SELECT c.owner chi_owner, c.table_name chi_table_name, c.constraint_name chi_constraint_name
, p.owner par_owner, p.table_name par_table_name, p.constraint_name par_constraint_name
FROM dba_constraints c,
dba_constraints p
WHERE c.owner = l_owner
AND c.table_name = l_table_name
AND c.constraint_type = 'R'
AND p.owner = c.r_owner
AND p.constraint_name = c.r_constraint_name
) LOOP
l_chi_cols := get_constraint_columns(rec.chi_owner, rec.chi_constraint_name);
l_par_cols := get_constraint_columns(rec.par_owner, rec.par_constraint_name);
dbms_output.new_line;
dbms_output.put_line('-- ' || rec.chi_owner || '.' || rec.chi_table_name || '.' || l_chi_cols || ' -> '
|| rec.par_owner || '.' || rec.par_table_name || '.' || l_par_cols);
dbms_output.put_line(
'SELECT * ' || CHR(10) ||
'FROM ' || rec.par_owner || '.' || rec.par_table_name || ' p' || CHR(10) ||
'WHERE (' || l_par_cols || ') IN (' || CHR(10) ||
' SELECT ' || l_chi_cols || CHR(10) ||
' FROM ' || rec.chi_owner || '.' || rec.chi_table_name || ' t' || CHR(10) ||
' WHERE ' || l_table_condition || CHR(10) ||
');'
);
END LOOP;
dbms_output.put_line('-- Referenced by');
dbms_output.put_line(LPAD('-', 10, '-'));
FOR rec IN (
SELECT c.owner chi_owner, c.table_name chi_table_name, c.constraint_name chi_constraint_name
, p.owner par_owner, p.table_name par_table_name, p.constraint_name par_constraint_name
FROM dba_constraints c,
dba_constraints p
WHERE p.owner = l_owner
AND p.table_name = l_table_name
AND p.constraint_type = 'P'
AND p.owner = c.r_owner
AND p.constraint_name = c.r_constraint_name
) LOOP
l_chi_cols := get_constraint_columns(rec.chi_owner, rec.chi_constraint_name);
l_par_cols := get_constraint_columns(rec.par_owner, rec.par_constraint_name);
dbms_output.new_line;
dbms_output.put_line('-- ' || rec.chi_owner || '.' || rec.chi_table_name || '.' || l_chi_cols || ' -> '
|| rec.par_owner || '.' || rec.par_table_name || '.' || l_par_cols);
dbms_output.put_line(
'SELECT * ' || CHR(10) ||
'FROM ' || rec.chi_owner || '.' || rec.chi_table_name || ' c' || CHR(10) ||
'WHERE (' || l_chi_cols || ') IN (' || CHR(10) ||
' SELECT ' || l_par_cols || CHR(10) ||
' FROM ' || rec.par_owner || '.' || rec.par_table_name || ' t' || CHR(10) ||
' WHERE ' || l_table_condition || CHR(10) ||
');'
);
END LOOP;
END;
/
2021-11-24
DB Link creation with direct insert
insert into sys.link$ (OWNER#, NAME, CTIME, HOST, USERID, PASSWORD, FLAG, AUTHUSR, AUTHPWD, PASSWORDX, AUTHPWDX)
SELECT user_id
,'DB_LINK_NAME'
,SYSDATE
,'CONNECTION_STRING',
'TARGET_DATABASE_USERNAME',
null, '2', null, NULL
,'hashed password can be extracted from sys.link$.PASSWORDX'
, NULL
FROM dba_users
WHERE username = 'LINK_OWNER' -- DB link owner
2021-11-08
Async using dbms_scheduler package
Simple async procession in Oracle can be implemented with dbms_scheduler package
-- dbms_scheduler.create_job makes explicit commit!!!
dbms_scheduler.create_job(
job_name => 'async_execution' || seq_async.nextval(),
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN some_asynk_proc(); END;',
start_date => sysdate,
enabled => TRUE,
auto_drop => TRUE,
comments => 'Async procedure call'
);
Competitive solution is implementation using dbms_aq queues, but as for me oracle’s queues from time to time start to raise unexpected exceptions. From other side for dbms_scheduler solution it is impossible to guarantee the order.
2021-10-29
All privileges for all users
-- You can filter results in last lines of query
WITH all_user_roles AS (
SELECT DISTINCT connect_by_root(grantee) username, granted_role, sys_connect_by_path(granted_role, '->') PATH, admin_option
FROM dba_role_privs p
START WITH grantee IN (SELECT username FROM dba_users)
CONNECT BY PRIOR granted_role = grantee
)
--SELECT * FROM all_user_roles;
, grantee AS (
SELECT username , granted_role, PATH FROM all_user_roles
UNION
SELECT username, NULL granted_role, 'DIRECT' AS PATH FROM dba_users
)
--SELECT * FROM grantee;
, priv_list AS (
SELECT 'ROLE' priv_type, username, granted_role priv, NULL AS owner, NULL AS table_name, NULL AS column_name, admin_option grantable, PATH
FROM all_user_roles
UNION
SELECT 'SYSTEM' priv_type, username, privilege priv, NULL AS owner, NULL AS table_name, NULL AS column_name, admin_option, PATH
FROM dba_sys_privs, grantee
WHERE grantee = grantee.username
UNION
SELECT 'TABLE' priv_type, username, PRIVILEGE, owner, table_name, NULL AS column_name, grantable, PATH
FROM dba_tab_privs, grantee
WHERE grantee = grantee.username
UNION
SELECT 'COLUMN' priv_type, username, PRIVILEGE, owner, table_name, column_name, grantable, PATH
FROM dba_col_privs, grantee
WHERE grantee = grantee.username)
SELECT *
FROM priv_list
WHERE priv = 'ALTER USER'
;
2021-06-21
GIT housekeeping
Remove branches not present in remote script from stackoverflow:
git checkout master; git remote update origin --prune; git branch -vv | Select-String -Pattern ": gone]" | % { $_.toString().Trim().Split(" ")[0]} | % {git branch -d $_}
2021-05-10
CONVERT for UTF-8
CONVERT function for clob input parameter in database with WE8MSWIN1252 character does not convert to UTF-8 - result is looking like UTF-16. Conversion to database character set works OK.
Test script
host chcp 65001
select CONVERT(CHR(246), 'AL32UTF8') from dual;
select CONVERT(TO_CLOB(CHR(246)), 'AL32UTF8') from dual;
select dump(to_char(CONVERT(TO_CLOB(CHR(246)), 'AL32UTF8'))) clob_convert, dump(CONVERT(CHR(246), 'AL32UTF8')) vc_convert
from dual;
host chcp 1252
select CONVERT(CHR(195) || CHR(182) , 'WE8MSWIN1252', 'AL32UTF8') from dual;
select CONVERT(TO_CLOB(CHR(195) || CHR(182)), 'WE8MSWIN1252', 'AL32UTF8') from dual;
Results
SQL> host chcp 65001
Active code page: 65001
SQL> select CONVERT(CHR(246), 'AL32UTF8') from dual;
ö
SQL> select CONVERT(TO_CLOB(CHR(246)), 'AL32UTF8') from dual;
SQL> select dump(to_char(CONVERT(TO_CLOB(CHR(246)), 'AL32UTF8', 'WE8MSWIN1252'))) clob_convert, dump(CONVERT(CHR(246), 'AL32UTF8')) vc_convert
2 from dual;
Typ=1 Len=2: 0,246 <--- UTF_16????
Typ=1 Len=2: 195,182
SQL> host chcp 1252
Active code page: 1252
SQL> select CONVERT(CHR(195) || CHR(182) , 'WE8MSWIN1252', 'AL32UTF8') from dual;
ö
SQL> select CONVERT(TO_CLOB(CHR(195) || CHR(182)), 'WE8MSWIN1252', 'AL32UTF8') from dual;
ö
Quote from Oracle docs
Note:Oracle discourages the use of the CONVERT function in the current Oracle Database release. The return value of CONVERT has a character data type, so it should be either in the database character set or in the national character set, depending on the data type. Any dest_char_set that is not one of these two character sets is unsupported. The char argument and the source_char_set have the same requirements. Therefore, the only practical use of the function is to correct data that has been stored in a wrong character set.
Values that are in neither the database nor the national character set should be processed and stored as RAW or BLOB. Procedures in the PL/SQL packages UTL_RAW and UTL_I18N—for example, UTL_RAW.CONVERT—allow limited processing of such values. Procedures accepting a RAW argument in the packages UTL_FILE, UTL_TCP, UTL_HTTP, and UTL_SMTP can be used to output the processed data.
2021-04-08
Jenkins: print script executed by sh
Put inside the script.
- Print to trace
find /var/jenkins_home/jobs/My_Job/workspace@tmp/ \\
-name 'script.sh' \\
-exec cat '{}' \\;
- Or copy whole folder
cp -r /var/jenkins_home/jobs/My_Job/workspace@tmp/ ~/tmp
Put the correct root folder in the script
2021-04-07
Character set conversion
Based on this
db1>SELECT value FROM nls_database_parameters WHERE parameter='NLS_CHARACTERSET';
VALUE
----------------------------------------------------------------
WE8MSWIN1252
To return in the national character set of the database use UNISTR function:
db1>SELECT dump(UNISTR('\00E4'), 1016), UNISTR('\00E4') FROM DUAL;
DUMP(UNISTR('\00E4'),1016) U
---------------------------------------- -
Typ=1 Len=2 CharacterSet=AL16UTF16: 0,e4 ä
To return in the database character set use TO_CHAR:
db1>SELECT DUMP(TO_CHAR(UNISTR('\00E4')), 1016), TO_CHAR(UNISTR('\00E4')) FROM DUAL;
DUMP(TO_CHAR(UNISTR('\00E4')),1016) T
----------------------------------------- -
Typ=1 Len=1 CharacterSet=WE8MSWIN1252: e4 ä
If characters are not shown correctly in SQL*Plus use chcp and NLS_LANG as explained here
db1>host chcp
Active code page: 1252
db1>host echo %NLS_LANG%
.WE8MSWIN1252
2021-04-02
Jenkins: fire on change of specific file in specific branch in Bitbucket
Perhaps different set of plugins can execute this task, but I’ve implemented with Generic Webhook trigger.
Final version is for Bitbucket’s Pull request merged webhook, but it also may be adopted to Repository push Webhook
In the Bitbucket webhook is configured that calls Jenkins.
In Jenkins build trigger is Generic Webhook Trigger.
Post content parameters
Variable | Expression | Comment |
---|---|---|
merge_commit_hash | $.pullRequest.properties.mergeCommit.id |
For pull request merge |
pull_request_branch | $.pullRequest.toRef.id |
For pull request merge |
commit_hash | $.changes[0].toHash |
For repository push |
push_branch | $.changes[0].refId |
For repository push |
Filter in Jenkins’ Generic Webhook Trigger to process only changes in specific branch:
Expression | Text |
---|---|
refs/heads/dev | $push_branch$pull_request_branch |
Example of simple Execute shell step :
#!/bin/bash
echo "Looking for $FILE_NAME in Commit $commit_hash $merge_commit_hash"
# list of changed files for a commit
#filelist=$(git diff-tree --no-commit-id --name-only -r $commit_hash)
#commit is merge commit - must use different tactics
filelist=$(git log -m -1 --name-only --pretty="format:" $merge_commit_hash)
if [[ $filelist == *"$FILE_NAME"* ]]; then
$ process file
else
echo "Skip"
fi
2021-03-24
2021-03-08
Database CI pipeline idea
- Prod release
- Remote online clone of Prod database using this technic. See the link above to the list of restrictions, but most influenceable are: source database must be in archivelog mode and have local undo (otherwise it should be in read-only mode), same endianness in source and target. The database link should exists from target to source.
- Keep created clone as golden source. Use PDB snapshot copy to create databases for testing
– parameterclonedb=true
should be set
– filesystem should supports sparse files (ext4, NTFS, … are ok)
– as spare files are used, whole process is very fast and don’t consumes a lot of space