This commit is contained in:
2018-10-05 00:13:57 +00:00
parent fc8ea40029
commit 11fe7dce59
4 changed files with 65 additions and 20 deletions

View File

@@ -1,5 +1,10 @@
PERFORMANCE SPECS AND USEFUL INFO
//this is handy:
http://okigiveup.net/what-postgresql-tells-you-about-its-performance/
Useful queries to indicate how indexes are being used in postgresql
This is a test query I used with widget and name fetching performance analysis:
@@ -16,19 +21,21 @@ select * from pg_stat_user_indexes
Reveals Unused indices
=-=-=-=-=-=-=-=-=-=-=-
SELECT
relid::regclass AS table,
indexrelid::regclass AS index,
pg_size_pretty(pg_relation_size(indexrelid::regclass)) AS index_size,
idx_tup_read,
idx_tup_fetch,
idx_scan
FROM
pg_stat_user_indexes
JOIN pg_index USING (indexrelid)
WHERE
idx_scan > 0
AND indisunique IS FALSE
SELECT * FROM pg_stat_user_indexes order by idx_scan asc
The basic technique is to look at pg_stat_user_indexes and look for ones where idx_scan,
the count of how many times that index has been used to answer queries, is zero, or at least very low.
Detecting missing indexes
=-=-=-=-=-=-=-=-=-=-=-=-=
SELECT relname, seq_scan, seq_tup_read,
idx_scan, idx_tup_fetch,
seq_tup_read / seq_scan
FROM pg_stat_user_tables
WHERE seq_scan > 0
ORDER BY seq_tup_read DESC;
Shows info on all indices
@@ -103,4 +110,34 @@ I'm going to enact a policy to index id,name in all objects that have many colum
Ok, it just makes logical sense to keep the indexes even if slightly slower, I can revisit this later, the difference is miniscule. I suspect with a bigger database there would definitely be better peformance.
=-=-=-=-=-=-=-=-=-=-=-=-
=-=-=-=-=-=-=-=-=-=-=-=-
LOCALE ITEM
b4 running multiple subset fetches
256287 256301 "public" "alocaleitem" "alocaleitem_localeid_key_idx" "2350" "13667" "5172"
256287 256302 "public" "alocaleitem" "alocaleitem_localeid_key_display_idx" "2618" "2618" "2618"
256287 256294 "public" "alocaleitem" "alocaleitem_pkey" "7085" "7090" "7085"
"alocaleitem" "24" "133010" "12053" "14875" "5542"
after I run a bunch of fetch subset test over and over:
256287 256301 "public" "alocaleitem" "alocaleitem_localeid_key_idx" "2398" "13715" "5220"
256287 256302 "public" "alocaleitem" "alocaleitem_localeid_key_display_idx" "2618" "2618" "2618"
256287 256294 "public" "alocaleitem" "alocaleitem_pkey" "7085" "7090" "7085"
seq_tup_read
"alocaleitem" "24" "133010" "12073" "14895" "5542"
more
"alocaleitem" "24" "133010" "12141" "14963" "5542"
256287 256301 "public" "alocaleitem" "alocaleitem_localeid_key_idx" "2438" "13755" "5260"
256287 256302 "public" "alocaleitem" "alocaleitem_localeid_key_display_idx" "2618" "2618" "2618"
256287 256294 "public" "alocaleitem" "alocaleitem_pkey" "7085" "7090" "7085"
Fresh regen, removed alocaleitem_localeid_key_idx kept alocaleitem_localeid_key_display_idx
Before any test runs fetching subsets:
256514 256521 "public" "alocaleitem" "alocaleitem_pkey" "0" "0" "0"
256514 256528 "public" "alocaleitem" "alocaleitem_localeid_key_display_idx" "1667" "1667" "1667"

View File

@@ -29,6 +29,9 @@ IMMEDIATE ITEMS:
================
- Schema: clean up all the LOOKAT items and verify the indexes are being used
- Run many tests then run this query: SELECT * FROM pg_stat_user_indexes order by idx_scan asc
- Look for any items that are not a primary key (name ends in _idx) and show zero idx_scan's, those are redundant
- The rest is Greek to me and not in a good way.
- EventLogProcessor.AddEntry: CHANGE this to save the context itself and then change all callers to handle that (remove save)
- I originally didn't have the save in there because I thought subsequent code might all share in the single context save,

View File

@@ -363,7 +363,7 @@ namespace AyaNova
// ******************** TESTING WIPE DB *****************************
//
//Set this to true to wipe the db and reinstall a trial license and re-seed the data
var TESTING_REFRESH_DB = false;
var TESTING_REFRESH_DB = true;
#if (DEBUG)
//TESTING

View File

@@ -23,8 +23,8 @@ namespace AyaNova.Util
private const int DESIRED_SCHEMA_LEVEL = 9;
internal const long EXPECTED_COLUMN_COUNT = 99;
internal const long EXPECTED_INDEX_COUNT = 23;
internal const long EXPECTED_INDEX_COUNT = 22;
//!!!!WARNING: BE SURE TO UPDATE THE DbUtil::PrepareDatabaseForSeeding WHEN NEW TABLES ADDED!!!!
/////////////////////////////////////////////////////////////////
@@ -141,15 +141,20 @@ namespace AyaNova.Util
//SEARCH TABLES
exec("CREATE TABLE asearchdictionary (id BIGSERIAL PRIMARY KEY, word varchar(255) not null)");
exec("CREATE UNIQUE INDEX asearchdictionary_word_idx ON asearchdictionary (word);");
exec("CREATE UNIQUE INDEX asearchdictionary_word_idx ON asearchdictionary (word);");
exec("CREATE TABLE asearchkey (id BIGSERIAL PRIMARY KEY, wordid bigint not null REFERENCES asearchdictionary (id), objectid bigint not null, objecttype integer not null, inname bool not null)");
//create locale text tables
exec("CREATE TABLE alocale (id BIGSERIAL PRIMARY KEY, ownerid bigint not null, name varchar(255) not null, stock bool, cjkindex bool default false)");
exec("CREATE UNIQUE INDEX alocale_name_idx ON alocale (name)");
//LOOKAT: I don't think this is doing anything:
//exec("CREATE UNIQUE INDEX alocale_name_idx ON alocale (name)");
exec("CREATE TABLE alocaleitem (id BIGSERIAL PRIMARY KEY, localeid bigint not null REFERENCES alocale (id), key text not null, display text not null)");
exec("CREATE INDEX alocaleitem_localeid_key_idx ON alocaleitem (localeid,key)");
//LOOKAT: this is for what exactly??
// exec("CREATE INDEX alocaleitem_localeid_key_idx ON alocaleitem (localeid,key)");
//This seems more appropriate
exec("CREATE INDEX alocaleitem_localeid_key_display_idx ON alocaleitem (localeid,key, display)");
//Load the default LOCALES
AyaNova.Biz.PrimeData.PrimeLocales(ct);