This commit is contained in:
2020-05-22 21:53:51 +00:00
parent 92b0892eb3
commit a7132d6ec3
3 changed files with 126 additions and 93 deletions

2
.vscode/launch.json vendored
View File

@@ -50,7 +50,7 @@
"AYANOVA_FOLDER_USER_FILES": "c:\\temp\\RavenTestData\\userfiles",
"AYANOVA_FOLDER_BACKUP_FILES": "c:\\temp\\RavenTestData\\backupfiles",
"AYANOVA_METRICS_USE_INFLUXDB": "false",
"AYANOVA_SERVER_TEST_MODE":"false",
"AYANOVA_SERVER_TEST_MODE":"true",
"AYANOVA_SERVER_TEST_MODE_SEEDLEVEL":"small",
"AYANOVA_SERVER_TEST_MODE_TZ_OFFSET":"-7",
"AYANOVA_BACKUP_PG_DUMP_PATH":"C:\\data\\code\\PostgreSQLPortable_12.0\\App\\PgSQL\\bin\\"

View File

@@ -19,8 +19,10 @@ namespace AyaNova.Biz
private static ILogger log = AyaNova.Util.ApplicationLogging.CreateLogger("CoreJobSweeper");
private static DateTime lastSweep = DateTime.MinValue;
private static TimeSpan SWEEP_EVERY_INTERVAL = new TimeSpan(0, 30, 0);
private static TimeSpan SUCCEEDED_JOBS_DELETE_AFTER_THIS_TIMESPAN = new TimeSpan(24, 0, 0);//24 hours
private static TimeSpan SUCCEEDED_JOBS_DELETE_AFTER_THIS_TIMESPAN = new TimeSpan(14, 0, 0, 0);//14 days
private static TimeSpan FAILED_JOBS_DELETE_AFTER_THIS_TIMESPAN = new TimeSpan(14, 0, 0, 0);//14 days (gives people time to notice and look into it)
private static TimeSpan INTERNAL_JOBS_LOGS_DELETE_AFTER_THIS_TIMESPAN = new TimeSpan(14, 0, 0, 0);//14 days
private static TimeSpan RUNNING_JOBS_BECOME_FAILED_AFTER_THIS_TIMESPAN = new TimeSpan(24, 0, 0);//24 hours (time running jobs are allowed to sit in "running" state before considered failed)
////////////////////////////////////////////////////////////////////////////////////////////////
@@ -46,12 +48,17 @@ namespace AyaNova.Biz
dtDeleteCutoff = DateTime.UtcNow - FAILED_JOBS_DELETE_AFTER_THIS_TIMESPAN;
await sweepAsync(ct, dtDeleteCutoff, JobStatus.Failed);
//KILL STUCK JOBS
//calculate cutoff to delete
DateTime dtRunningDeadline = DateTime.UtcNow - RUNNING_JOBS_BECOME_FAILED_AFTER_THIS_TIMESPAN;
await killStuckJobsAsync(ct, dtRunningDeadline);
//SWEEP INTERNAL JOB LOG
//calculate cutoff to delete
dtDeleteCutoff = DateTime.UtcNow - INTERNAL_JOBS_LOGS_DELETE_AFTER_THIS_TIMESPAN;
await SweepInternalJobsLogsAsync(ct, dtDeleteCutoff);
lastSweep = DateTime.UtcNow;
}
@@ -111,6 +118,32 @@ namespace AyaNova.Biz
}
private static async Task SweepInternalJobsLogsAsync(AyContext ct, DateTime dtDeleteCutoff)
{
//Get the deleteable list (this is for reporting, could easily just do it in one go)
var logs = await ct.OpsJobLog
.AsNoTracking()
.Where(z => z.Created < dtDeleteCutoff)
.OrderBy(z => z.Created)
.ToListAsync();
log.LogTrace($"SweepInternalJobsLogsAsync processing: cutoff={dtDeleteCutoff.ToString()}, for {logs.Count.ToString()} log entries");
foreach (OpsJobLog l in logs)
{
try
{
await ct.Database.ExecuteSqlInterpolatedAsync($"delete from aopsjoblog where gid = {l.GId}");
}
catch (Exception ex)
{
log.LogError(ex, "SweepInternalJobsLogsAsync exception removed old log entries");
throw (ex);
}
}
}
/////////////////////////////////////////////////////////////////////
}//eoc

View File

@@ -23,7 +23,7 @@ namespace AyaNova.Util
private const int DESIRED_SCHEMA_LEVEL = 11;
internal const long EXPECTED_COLUMN_COUNT = 310;
internal const long EXPECTED_INDEX_COUNT = 134;
internal const long EXPECTED_INDEX_COUNT = 133;
//!!!!WARNING: BE SURE TO UPDATE THE DbUtil::EmptyBizDataFromDatabaseForSeedingOrImporting WHEN NEW TABLES ADDED!!!!
@@ -48,89 +48,89 @@ namespace AyaNova.Util
*/
#region unused index query
/*
COPY taken 2020-05-21 from link above "jerkus" :)
#region unused index query
/*
COPY taken 2020-05-21 from link above "jerkus" :)
WITH table_scans as (
SELECT relid,
tables.idx_scan + tables.seq_scan as all_scans,
( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,
pg_relation_size(relid) as table_size
FROM pg_stat_user_tables as tables
),
all_writes as (
SELECT sum(writes) as total_writes
FROM table_scans
),
indexes as (
SELECT idx_stat.relid, idx_stat.indexrelid,
idx_stat.schemaname, idx_stat.relname as tablename,
idx_stat.indexrelname as indexname,
idx_stat.idx_scan,
pg_relation_size(idx_stat.indexrelid) as index_bytes,
indexdef ~* 'USING btree' AS idx_is_btree
FROM pg_stat_user_indexes as idx_stat
JOIN pg_index
USING (indexrelid)
JOIN pg_indexes as indexes
ON idx_stat.schemaname = indexes.schemaname
AND idx_stat.relname = indexes.tablename
AND idx_stat.indexrelname = indexes.indexname
WHERE pg_index.indisunique = FALSE
),
index_ratios AS (
SELECT schemaname, tablename, indexname,
idx_scan, all_scans,
round(( CASE WHEN all_scans = 0 THEN 0.0::NUMERIC
ELSE idx_scan::NUMERIC/all_scans * 100 END),2) as index_scan_pct,
writes,
round((CASE WHEN writes = 0 THEN idx_scan::NUMERIC ELSE idx_scan::NUMERIC/writes END),2)
as scans_per_write,
pg_size_pretty(index_bytes) as index_size,
pg_size_pretty(table_size) as table_size,
idx_is_btree, index_bytes
FROM indexes
JOIN table_scans
USING (relid)
),
index_groups AS (
SELECT 'Never Used Indexes' as reason, *, 1 as grp
FROM index_ratios
WHERE
idx_scan = 0
and idx_is_btree
UNION ALL
SELECT 'Low Scans, High Writes' as reason, *, 2 as grp
FROM index_ratios
WHERE
scans_per_write <= 1
and index_scan_pct < 10
and idx_scan > 0
and writes > 100
and idx_is_btree
UNION ALL
SELECT 'Seldom Used Large Indexes' as reason, *, 3 as grp
FROM index_ratios
WHERE
index_scan_pct < 5
and scans_per_write > 1
and idx_scan > 0
and idx_is_btree
and index_bytes > 100000000
UNION ALL
SELECT 'High-Write Large Non-Btree' as reason, index_ratios.*, 4 as grp
FROM index_ratios, all_writes
WHERE
( writes::NUMERIC / ( total_writes + 1 ) ) > 0.02
AND NOT idx_is_btree
AND index_bytes > 100000000
ORDER BY grp, index_bytes DESC )
SELECT reason, schemaname, tablename, indexname,
index_scan_pct, scans_per_write, index_size, table_size
FROM index_groups;
*/
#endregion
WITH table_scans as (
SELECT relid,
tables.idx_scan + tables.seq_scan as all_scans,
( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,
pg_relation_size(relid) as table_size
FROM pg_stat_user_tables as tables
),
all_writes as (
SELECT sum(writes) as total_writes
FROM table_scans
),
indexes as (
SELECT idx_stat.relid, idx_stat.indexrelid,
idx_stat.schemaname, idx_stat.relname as tablename,
idx_stat.indexrelname as indexname,
idx_stat.idx_scan,
pg_relation_size(idx_stat.indexrelid) as index_bytes,
indexdef ~* 'USING btree' AS idx_is_btree
FROM pg_stat_user_indexes as idx_stat
JOIN pg_index
USING (indexrelid)
JOIN pg_indexes as indexes
ON idx_stat.schemaname = indexes.schemaname
AND idx_stat.relname = indexes.tablename
AND idx_stat.indexrelname = indexes.indexname
WHERE pg_index.indisunique = FALSE
),
index_ratios AS (
SELECT schemaname, tablename, indexname,
idx_scan, all_scans,
round(( CASE WHEN all_scans = 0 THEN 0.0::NUMERIC
ELSE idx_scan::NUMERIC/all_scans * 100 END),2) as index_scan_pct,
writes,
round((CASE WHEN writes = 0 THEN idx_scan::NUMERIC ELSE idx_scan::NUMERIC/writes END),2)
as scans_per_write,
pg_size_pretty(index_bytes) as index_size,
pg_size_pretty(table_size) as table_size,
idx_is_btree, index_bytes
FROM indexes
JOIN table_scans
USING (relid)
),
index_groups AS (
SELECT 'Never Used Indexes' as reason, *, 1 as grp
FROM index_ratios
WHERE
idx_scan = 0
and idx_is_btree
UNION ALL
SELECT 'Low Scans, High Writes' as reason, *, 2 as grp
FROM index_ratios
WHERE
scans_per_write <= 1
and index_scan_pct < 10
and idx_scan > 0
and writes > 100
and idx_is_btree
UNION ALL
SELECT 'Seldom Used Large Indexes' as reason, *, 3 as grp
FROM index_ratios
WHERE
index_scan_pct < 5
and scans_per_write > 1
and idx_scan > 0
and idx_is_btree
and index_bytes > 100000000
UNION ALL
SELECT 'High-Write Large Non-Btree' as reason, index_ratios.*, 4 as grp
FROM index_ratios, all_writes
WHERE
( writes::NUMERIC / ( total_writes + 1 ) ) > 0.02
AND NOT idx_is_btree
AND index_bytes > 100000000
ORDER BY grp, index_bytes DESC )
SELECT reason, schemaname, tablename, indexname,
index_scan_pct, scans_per_write, index_size, table_size
FROM index_groups;
*/
#endregion
static int startingSchema = -1;
public static int currentSchema = -1;
@@ -237,7 +237,7 @@ FROM index_groups;
"ayid bigint not null, ayatype integer not null, ayevent integer not null, textra varchar(255))");
//INDEX: Most selective first as there is more unique ID's than unique types
await ExecQueryAsync("CREATE INDEX aevent_typeid_idx ON aevent (ayid, ayatype);");
//TODO: this may be a very low used index, revisit it down the road
await ExecQueryAsync("CREATE INDEX aevent_userid_idx ON aevent (userid);");
@@ -261,7 +261,7 @@ FROM index_groups;
await ExecQueryAsync("CREATE INDEX asearchkey_wordid_otype_idx ON asearchkey (wordid, objecttype);");
//Search indexing stored procedure
await ExecQueryAsync(@"
await ExecQueryAsync(@"
CREATE OR REPLACE PROCEDURE public.aydosearchindex(
wordlist text[],
ayobjectid bigint,
@@ -301,10 +301,10 @@ $BODY$;
//create translation text tables
await ExecQueryAsync("CREATE TABLE atranslation (id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, name varchar(255) not null, stock bool, cjkindex bool default false)");
await ExecQueryAsync("CREATE TABLE atranslationitem (id BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, translationid bigint not null REFERENCES atranslation (id), key text not null, display text not null)");
//This is not a well used index, not sure what it's point is
// await ExecQueryAsync("CREATE INDEX atranslationitem_translationid_key_display_idx ON atranslationitem (translationid,key, display)");
//This is not a well used index, not sure what it's point is
// await ExecQueryAsync("CREATE INDEX atranslationitem_translationid_key_display_idx ON atranslationitem (translationid,key, display)");
//Load the default TRANSLATIONS
await AyaNova.Biz.PrimeData.PrimeTranslations();
@@ -402,8 +402,8 @@ $BODY$;
await ExecQueryAsync("CREATE TABLE aopsjob (gid uuid PRIMARY KEY, name text not null, created timestamp not null, exclusive bool not null, " +
"startafter timestamp not null, jobtype integer not null, subtype integer null, objectid bigint null, objecttype integer null, jobstatus integer not null, jobinfo text null)");
await ExecQueryAsync("CREATE TABLE aopsjoblog (gid uuid PRIMARY KEY, jobid uuid not null REFERENCES aopsjob (gid), created timestamp not null, statustext text not null)");
await ExecQueryAsync("CREATE TABLE aopsjoblog (gid uuid PRIMARY KEY, jobid uuid not null, created timestamp not null, statustext text not null)");
// REFERENCES aopsjob (gid) took this out to allow for internal job logs with guid.empty and no parent job as there seems no need anyway
await SetSchemaLevelAsync(++currentSchema);
}