65
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
{
|
||||
// Use IntelliSense to find out which attributes exist for C# debugging
|
||||
// Use hover for the description of the existing attributes
|
||||
// For further information visit https://github.com/OmniSharp/omnisharp-vscode/blob/master/debugger-launchjson.md
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": ".NET Core Launch (web)",
|
||||
"type": "coreclr",
|
||||
"request": "launch",
|
||||
"preLaunchTask": "build",
|
||||
// If you have changed target frameworks, make sure to update the program path.
|
||||
"program": "${workspaceFolder}/server/AyaNova/bin/Debug/netcoreapp2.1/AyaNova.dll",
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}/server/AyaNova",
|
||||
"stopAtEntry": false,
|
||||
"internalConsoleOptions": "openOnSessionStart",
|
||||
"launchBrowser": {
|
||||
"enabled": true,
|
||||
"args": "${auto-detect-url}/api/v8/",
|
||||
"windows": {
|
||||
"command": "cmd.exe",
|
||||
"args": "/C start http://localhost:7575/api/v8/"
|
||||
},
|
||||
"osx": {
|
||||
"command": "open"
|
||||
},
|
||||
"linux": {
|
||||
"command": "xdg-open"
|
||||
}
|
||||
},
|
||||
"env": {
|
||||
"ASPNETCORE_ENVIRONMENT": "Development",
|
||||
"AYANOVA_LOG_LEVEL": "Info",
|
||||
"AYANOVA_DEFAULT_LANGUAGE": "de",
|
||||
//"AYANOVA_PERMANENTLY_ERASE_DATABASE": "true",
|
||||
"AYANOVA_DB_CONNECTION":"Server=localhost;Username=postgres;Password=raven;Database=AyaNova;",
|
||||
"AYANOVA_USE_URLS": "http://*:7575;",
|
||||
"AYANOVA_FOLDER_USER_FILES": "c:\\temp\\RavenTestData\\userfiles",
|
||||
"AYANOVA_FOLDER_BACKUP_FILES": "c:\\temp\\RavenTestData\\backupfiles",
|
||||
"AYANOVA_METRICS_USE_INFLUXDB": "false"
|
||||
|
||||
},
|
||||
"sourceFileMap": {
|
||||
"/Views": "${workspaceFolder}/Views"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": ".NET Core Attach",
|
||||
"type": "coreclr",
|
||||
"request": "attach",
|
||||
"processId": "${command:pickProcess}"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
// "AYANOVA_DB_CONNECTION":"Server=localhost;Username=postgres;Password=raven;Database=ayanovadev",
|
||||
//"AYANOVA_LOG_LEVEL": "Info"
|
||||
//"AYANOVA_DB_CONNECTION":"Server=localhost;Username=postgres;Password=raven;Database=AyaNova",
|
||||
|
||||
// "AYANOVA_PERMANENTLY_ERASE_DATABASE": "true",
|
||||
|
||||
//Development system folders
|
||||
//"AYANOVA_FOLDER_USER_FILES": "c:\\temp\\RavenTestData\\userfiles",
|
||||
//"AYANOVA_FOLDER_BACKUP_FILES": "c:\\temp\\RavenTestData\\backupfiles",
|
||||
19
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "build",
|
||||
"command": "dotnet",
|
||||
"type": "process",
|
||||
"args": [
|
||||
"build",
|
||||
"${workspaceFolder}/server/AyaNova/AyaNova.csproj"
|
||||
],
|
||||
"problemMatcher": "$msCompile",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
322
devdocs/coding-standards.txt
Normal file
@@ -0,0 +1,322 @@
|
||||
# “Do the simplest thing that will work.”
|
||||
|
||||
|
||||
|
||||
#DISTRIBUTION / DEPLOYMENT
|
||||
- Linux folders to use:
|
||||
- Program files in /opt
|
||||
- Data files in /var/lib
|
||||
- Log files /var/log/
|
||||
|
||||
|
||||
|
||||
|
||||
#PROCESS
|
||||
|
||||
- LEAST PRIVILEGE: Code everything for least privilege possible. The principle of “least privilege” mandates that a process should have the lowest level
|
||||
of privilege needed to accomplish its task
|
||||
- Test / Evaluation data generator developed hand in hand with testing and application code
|
||||
- Data generator is very important, take the time to get it right
|
||||
- Production size and complexity data is required for proper development right from the start
|
||||
- As new modules are added need to add data generation for them as well
|
||||
- Test driven development
|
||||
- Dependency injection
|
||||
- https://joonasw.net/view/aspnet-core-di-deep-dive
|
||||
- Separation of concerns: a Customer object should not have to deal with it's persistence at all directly for example.
|
||||
- Rapid release cycles of small features (every 2-4 weeks)
|
||||
- NEVER select *, always specify columns, shouldn't be a problem with EF but remember this, it's for futureproofing and simultaneous versions running in api
|
||||
- AGILE-like PROCESS
|
||||
- (http://jack-vanlightly.com/blog/2017/9/17/you-dont-need-scrum-you-need-agility)
|
||||
- At every decision favor whatever will in future involve the least amount of supervision by me or manual steps or work Roughly "Agile":
|
||||
- Our highest priority is to satisfy the customer through early and continuous delivery of valuable software.
|
||||
- Welcome changing requirements, even late in development. Agile processes harness change for the customer's competitive advantage.
|
||||
- Deliver working software frequently, from a couple of weeks to a couple of months, with a preference to the shorter timescale.
|
||||
- Business people and developers must work together daily throughout the project.
|
||||
- Build projects around motivated individuals. Give them the environment and support they need, and trust them to get the job done.
|
||||
- The most efficient and effective method of conveying information to and within a development team is face-to-face conversation.
|
||||
- Working software is the primary measure of progress.
|
||||
- Agile processes promote sustainable development. The sponsors, developers, and users should be able to maintain a constant pace indefinitely.
|
||||
- Continuous attention to technical excellence and good design enhances agility.
|
||||
- Simplicity--the art of maximizing the amount of work not done--is essential.
|
||||
- The best architectures, requirements, and designs emerge from self-organizing teams.
|
||||
- At regular intervals, the team reflects on how to become more effective, then tunes and adjusts its behavior accordingly.
|
||||
|
||||
|
||||
|
||||
- BEST PRACTICES
|
||||
- SECURITY (https://docs.microsoft.com/en-us/aspnet/core/security/)
|
||||
- Enforce SSL: https://docs.microsoft.com/en-us/aspnet/core/security/enforcing-ssl
|
||||
- PERFORMANCE
|
||||
- Don't use session state at all if possible. Better to reload a session based on JWT userID from persistent storage.
|
||||
- LEAST PRIVILEGE: DO NOT REQUIRE ROOT ACCESS Code everything for least privilege possible. The principle of “least privilege” mandates that a process should have the lowest level of privilege needed to accomplish its task
|
||||
- CONFIGURATION: have as few sources of configuration as possible, ideally in one place or one copy replicated.
|
||||
- PASSWORDS: passwords to production databases should be kept separate from any other configuration files. They should especially be kept out of the installation directory for the software
|
||||
- SECRETS: should be using Environment variables in dev and production. MS says in dev do differently but that is not ideal for consistency and testing
|
||||
- We do not want to have to set multiple configs in multiple locations and hunt around or worse yet have it in code.
|
||||
- Track configuration changes somewhere for analysis when shit goes south. If a user changed a config need to know when and what was changed.
|
||||
- Configuration should be highly protected, it contains database passwords and other important stuff
|
||||
- Never keep the configuration file in the application folder, it will be overwritten on install or restore and if hacked it may be available to the hacker compromising other things
|
||||
- Name config properties according to their function not their nature. Don't use Hostname, use AuthenticationServer
|
||||
- A UI might be helpful here, one that can also track old versions of config for reference so user can revert or look back.
|
||||
|
||||
- A database failure should not bring down the webapi server, it should be able to handle that gracefully with helpful diagnostic information
|
||||
- Need stress and failure testing early and often, i.e. kill database server - what happens? etc
|
||||
- Need longevity testing, have the system keep testing in the background continuously for a week or more with high adn low volume transaction testing
|
||||
- SMALL QUERIES: Favor smaller simpler multiple queries through EF rather than trying to get a whole large graph with many joins at once, i.e. build up the object graph from several small queries rather than one huge one.
|
||||
- (this is my own idea based off issues with things in Ayanova leading to enormous SQL
|
||||
- Tailor small objects that satisfy what would take a much heavier query to satisfy UI requirements, i.e. a tailored customer list for a specific need would be better than loading all the customer data for the list
|
||||
- ISOLATE reporting and history / audit log functionality from transactional functionality
|
||||
- we don't want to use reporting objects in transactions as they are often ad-hoc slower to query, not as streamlined
|
||||
- Transactions are more important than reporting, don't let reporting hurt transactions
|
||||
- Consider a separate database for storing anything not required to process transactions like history, audit, INACTIVE objects that are archived, some reporting etc
|
||||
- PURGING "A rigorous regimen of data purging is vital to the long-term stability and performance of your system."
|
||||
- a second long term storage db can be used for purged data to keep it out of the active transaction data.
|
||||
- Transaction code should be vanilla ORM sql issued, avoid hand crafted sql in business transaction code as it makes db tuning much harder
|
||||
- RESOURCE POOLING is critical
|
||||
- Be very careful with it
|
||||
- Do not allow callers to block forever. Make sure that any checkout call has a timeout and that the caller knows what to do when it doesn’t get a connection back
|
||||
- Undersized resource pools lead to contention and increased latency. This defeats the purpose of pooling the connections in the first place. Monitor calls to the connection pools to see how long your threads are waiting to check out connections.
|
||||
- CACHING
|
||||
- Needs a memory limit
|
||||
- Monitor hit rates for the cached items to see whether most items are being used from cache
|
||||
- avoid caching things that are cheap to generate
|
||||
- Seldom-used, tiny, or inexpensive objects aren’t worth caching
|
||||
- PRECOMPUTE anything large and relatively static that changes infrequently so it isn't dynamically generated every time
|
||||
- NETWORK (at data center level, these might not apply but making note here)
|
||||
- good network design for the data center partitions the backup traffic onto its own network segment
|
||||
- Have a separate network for administration purposes (like we do with softlayer)
|
||||
- This is actually very important for security and peformance, a hacker of the public interface can't access admin functionality if it's bound to another NIC / network
|
||||
- App needs to be configurable which network interface is used for which part so that it's not listening on *all* networks exposing danger to the private network
|
||||
- SLA Even if we never have a service level agreement with our users we should implement it internally so we know if we are living up to it
|
||||
- Good section in the "Release it" book about this, but for now
|
||||
- Have some metrics to watch
|
||||
- Have a service that does synthetic transactions to monitor the live service and log issues.
|
||||
- SLA should be concerned with specific features not as a whole because some functionality is more important than others and can have radically different possible SLA because it may or may not rely on 3rd parties.
|
||||
- AN SLA can only ever be as good as the poorest SLA of anything our service relies on. If an integral component has no SLA then we can't have one either.
|
||||
- LOAD BALANCING: Need it in hosting scenario but it relies on the underlying architecture so this is more in the area of the CONTAINERIZATION Research
|
||||
- DESIGN FOR FAILURE MODES UP FRONT: failures will happen, need to control what happens when parts fail properly
|
||||
- One way to prepare for every possible failure is to look at every external call, every I/O, every use of resources, and every expected outcome and ask, “What are all the ways this can go wrong?”
|
||||
|
||||
- Mock client: if hosting, need an external (not co-located) automated mock client that can detect if the system is down
|
||||
- TIMEOUTS - Always use Timeouts for external resources like database connections, other remote servers network comms etc
|
||||
- Instead of handling timeouts all over the place for similar ops, abstract it into an object (i.e.QueryObject) that has the timeout code in it
|
||||
- Use a generic Gateway to provide the template for connection handling, error handling, query execution, and result processing.
|
||||
- Timeouts have natural synergy with circuit breakers. A circuit breaker can tabulate timeouts, tripping to the “off” state if too many occur.
|
||||
- Fail fast: when a resource times out send an error response immediately and drop that transaction
|
||||
- Fail Fast applies to incoming requests, whereas the Timeouts pattern applies primarily to outbound requests. They’re two sides of the same coin.
|
||||
- DONT JUST TRY A TRANSACTION: Check resource availability at the start of a transaction (check with circuit breakers what their state is) and fail fast if not able to processing
|
||||
- Do basic user input validation even before you reserve resources. Don’t bother checking out a database connection, fetching domain objects, populating them, and calling validate( ) just to find out that a required parameter wasn’t entered.
|
||||
|
||||
- CIRCUIT BREAKERS
|
||||
- Coupled with timeouts usually for external resources but could also be used for internal critical code
|
||||
- Very useful to be able to trip them on demand from an OPERATIONS point of view or reset them on demand
|
||||
- if a call fails increment a count, if it passes threshold immediately fast fail and stop making that call and start a timeout
|
||||
- After fail timeout then half open and try the call again if it passes then close the circuit breaker and go back to normal
|
||||
- If a half open fails again then start the fail timeout again
|
||||
- These are critical incidents to report
|
||||
- If circuit breaker is open then it should fast fail message back to it's caller indicating the fault
|
||||
- Popping a Circuit Breaker always indicates there is a serious problem. It should be visible to operations. It should be reported,recorded, trended, and correlated.
|
||||
- For a website using service-oriented architectures, “fast enough” is probably anything less than 250 milliseconds.
|
||||
- Protect against unbounded result sets (i.e. sql query suddenly returning millions of rows when only a few expected [USE LIMIT CLAUSE ALWAYS])
|
||||
- DO not allow unbounded result sets to be returned by our api, always enforce a built in limit per transaction with paging if no limit is specified
|
||||
- this way a user can't request unlimited data in one call
|
||||
- Also if due to something unexpected a ton of records are created in a table this will prevent a crash from sending all that data back
|
||||
- TESTING
|
||||
- REPLICATE PRODUCTION LOADS EARLY IN TESTING: an hour or two of development time spent creating a data generator will pay off many times over
|
||||
- MULTIPLE SERVERS: if a configuration requires multiple servers in production, be sure to test it that way.
|
||||
- using Virtual Machines if necessary.
|
||||
- If testing on one machine what would normally run on multiple it's easy to miss something vital
|
||||
- FIREWALLS: enable a full firewall on a testing machine and then darefully document any ports that need to be opened as this will be needed for production / installation
|
||||
- STARTUP AND SHUTDOWN
|
||||
- Build a clean startup sequence that verifies everything before flipping a switch to let users in (preflight check)
|
||||
- Don't accept connections until startup is complete
|
||||
- Don't just startup and then exit if PFC fails, it should be up and running to be interrogated by administrator
|
||||
- Clean shutdown: don't just hard shutdown, have a mechanism for each module to complete it's work but not accept new work until all transactions are completed
|
||||
- Timeout the shutdown so if something hangs it can't stop the whole thing from being shut down.
|
||||
- ADMINISTRATION
|
||||
- Ability to set entire API to read only mode both on demand (control panel) and in code (for backup process)
|
||||
- Simple html based admin is ok but command line is better because it can be automated / accessed over a remote shell easily.
|
||||
- Don't have a fancy native app gui admin because it will piss off administrators and be hard to use over remote access
|
||||
- Ideally a simple html for regular users and a command line one for power users.
|
||||
- Try to make every admin function scriptable from the command line
|
||||
- "Jumphost": a single machine, very tightly secured, that is allowed to connect via SSH to the production servers
|
||||
- The ability to restart components, instead of entire servers, is a key concept of recovery-oriented computing
|
||||
- OPS TRANSPARENCY / DASHBOARD
|
||||
- This is important and needs to be in there just as much as the rest
|
||||
- Think of a dashboard that can be seen at a glance or left up all day on a screen in a "command center"
|
||||
- Should show real time snapshot but also scheduled daily events, whether they succeeded or not, i.e. notifications being sent out etc
|
||||
- transparency: historical trending, predictive forecasting, present status, and instantaneous behavior
|
||||
- Log to "ops" database "OpsDB" See page 300 of Release IT for more guideance on this.
|
||||
- Client side api to feed data to ops db
|
||||
- This is important, see the Release It book page 271 for some guidance on what to track
|
||||
- For the most utility, the dashboard should be able to present different facets of the overall system to different users. An engineer in operations probably cares first about the component-level view. A developer is more likely to want an application-centric view, whereas a business sponsor probably wants a view rolled up to the feature or business process level.
|
||||
- COLOR CODING:
|
||||
- *Green* All of the following must be true:
|
||||
- All expected events have occurred.
|
||||
- No abnormal events have occurred.
|
||||
- All metrics are nominal.
|
||||
- All states are fully operational.
|
||||
- *Yellow* At least one of the following is true:
|
||||
- An expected event has not occurred.
|
||||
- At least one abnormal event, with a medium severity,
|
||||
has occurred.
|
||||
- One or more parameters is above or below nominal.
|
||||
- A noncritical state is not fully operational. (For example,
|
||||
a circuit breaker has cut off a noncritical feature.)
|
||||
- *Red* At least one of the following is true:
|
||||
- A required event has not occurred.
|
||||
- At least one abnormal event, with high severity, has
|
||||
occurred.
|
||||
- One or more parameters is far above or below nominal.
|
||||
- A critical state is not at its expected value. (For example,
|
||||
“accepting requests” is false when it should be true.)
|
||||
- LOGGING
|
||||
- Always allow OPS to set the location of the log file
|
||||
- Use a logging framework, don't roll one. (LOG4NET?)
|
||||
- Log files are human readable so they constitute a human computer interface and should be designed accordingly
|
||||
- Clear, accurate and actionable information
|
||||
- columnar space padded, can be read and scanned quickly by humans and also read by software:
|
||||
- [datetime] errornumber location/source severity message
|
||||
- Messages should include some kind of transaction id to trace the steps of a transaction if appropriate (user id, session id, arbitrary ID generated on first step of transaction etc)
|
||||
- Design with purging / pruning log files in mind up front
|
||||
- Don't log to a resource used by the production system (i.e. don't log in the same database as the app is using, don't log to the same disk or volume)
|
||||
- Always use a rolling log format, don't just keep appending.
|
||||
- Do NOT deploy with full debug logs enabled, it's too much noise to spot problems (see AyaNova current log for that)
|
||||
- Ensure a ERROR message is relevant to OPS, not just a business logic issue. It should be something that needs doing something about to be error level.
|
||||
- ** Use short message codes / code numbers so users can convey them easily instead of the long text message!!!
|
||||
- CATALOG OF MESSAGES build a catalog of all the messages that could appear in the log file is hepful to end users
|
||||
- MONITORING SYSTEMS
|
||||
- Logging of severe errors to OS application log can be used to integrate to automatic monitoring systems so it should be an option
|
||||
- Page 297 of Release It has some idea of what to expose and how to expose it.
|
||||
- ADAPTABILITY / CODING DESIGN DECISIONS / FUTUREPROOF
|
||||
- VERSIONING
|
||||
- Static assets should be in a version folder right off the bat, i.e. wwwRoot/css/v1/app.css, wwwRoot/js/lib/v1/jqueryxx.js or wwwRoot/js/templates/v1/
|
||||
- I think naming them similar to the api endpoint versioning is a good idea, i.e "v1" or "v2.1" etc.
|
||||
- this way they can still be served up to old clients without breaking new ones
|
||||
- Need the flexibility of having different version numbers at the backend and frontend. I.E. can refer to AyaNova backend v8.1 and front end v8.3 but keep it in the family of 8.x?
|
||||
- ?? Or maybe the backend is just an incrementing number like a schema update, could be 1000 for all it matters?
|
||||
- ?? Not sure how to handle the index page, maybe it needs to be version agnostic and in turn call another page or something,
|
||||
- maybe Index.html with menu to select indexV2.html or indexV2.3.html
|
||||
- "A new version is available, switch to version 8.5?" user selects and they book mark to that new version indexv8.5.html?
|
||||
- Database versioning (this one is trickiest of all, can't remove old objects until the api is unsupported, but they might need to change, will require creative solutions)
|
||||
- Select * is bad with reversioning, instead selecting exact columns is safer and MORE FUTUREPROOF
|
||||
- Can't drop old columns or set IS NOT NULL on some if they changed that way until after the new release is fully adopted and the old can be removed.
|
||||
- Refactoring
|
||||
- Constantly improving the design of existing code
|
||||
- Only possible with unit testing
|
||||
- Test driven development: write the test first then write the code to pass the test
|
||||
- Write just enough code to make the test pass and not one line more (YAGNI), once the test passes you can refactor all you want as long as the test passes
|
||||
- Mocks are good because they immediately cause the object under test to be "re-used", once in production and once in testing with a mock object, so reuse is tested as well.
|
||||
- Dependency injection
|
||||
- components should interact through interfaces and shouldn’t directly instantiate each other.
|
||||
- Instead, some other agency should “wire up” the application out of loosely coupled components
|
||||
- The container wires components together at runtime based on a configuration file or application definition
|
||||
- Encourages loose coupling
|
||||
- Helps with testing
|
||||
- Defining and using interfaces is the main key to successfully achieving flexibility with dependency injection
|
||||
- Objects collaborating through interfaces can have either endpoint swapped out without noticing.
|
||||
- That swap can replace the existing endpoint with new functionality, or the substitute can be a mock object used for unit testing.
|
||||
- Dependency injection using interfaces preserves your ability to make localized changes
|
||||
- https://joonasw.net/view/aspnet-core-di-deep-dive
|
||||
- https://docs.microsoft.com/en-us/aspnet/core/fundamentals/dependency-injection
|
||||
- http://deviq.com/strategy-design-pattern/
|
||||
- http://deviq.com/separation-of-concerns/
|
||||
|
||||
- SEPARATION OF CONCERNS
|
||||
- Presentation layer
|
||||
- The Presentation Layer should include all components and processes exclusively related to the visual display needs of an application, and should exclude all other components and processes
|
||||
- Service interface layer
|
||||
-
|
||||
- Business layer
|
||||
- The primary goal of the Business Layer is to encapsulate the core business concerns of an application exclusive of how data and behavior is exposed, or how data is specifically obtained. The Business Layer should include all components and processes exclusively related to the business domain of the application, and should exclude all other components and processes.
|
||||
- Object model
|
||||
- Business logic
|
||||
- Workflow
|
||||
- Resource access layer
|
||||
- The goal of the Resource Access Layer is to provide a layer of abstraction around the details specific to data access.
|
||||
- The Resource Access Layer should include all components and processes exclusively related to accessing data external to the system, and should exclude all other components and processes
|
||||
- FIPS
|
||||
- Don't use managed encryption if want to support FIPS
|
||||
|
||||
|
||||
TOOLING
|
||||
=-=-=-=
|
||||
NO PROPRIETARY OR COMMERCIAL COMPONENTS OR TOOLS WHEREVER POSSIBLE
|
||||
Need to automate the fuck out of anything that can be automated.
|
||||
Do this early on so time is saved right from the start.
|
||||
|
||||
|
||||
|
||||
NAMING
|
||||
=-=-=-
|
||||
|
||||
.net Namespace:
|
||||
COMPANY.PRODUCT.AREA (server)
|
||||
GZTW.AyaNova.whatever-whatever
|
||||
|
||||
|
||||
Files, routes, urls etc:
|
||||
Use lowercase entirely everywhere, do not use uppercase, this avoids future confusion all around.
|
||||
No spaces in names, this avoids having to use quotes in paths etc
|
||||
Use spinal (kebab) delimiter, i.e.: coding-standards.txt
|
||||
Here is some REST api guidelines to naming:
|
||||
https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#16-naming-guidelines
|
||||
|
||||
CSS:
|
||||
BEM naming - http://getbem.com/
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#TESTING
|
||||
- THINGS TO TEST
|
||||
- Concurrency exceptions with each db type as it could be an issue
|
||||
- Coding should go hand in hand with testing, don't write anything that can't be tested immediately
|
||||
- Write a data generator that goes hand in hand with testing, need large, realistic dataset generatable on demand to support testing
|
||||
- Unit tests where useful but a main focus on integration tests, need to be able to hit one button and be certain a build is passing
|
||||
- Going to need to test all architecture levels early and continuously. I.e. in a docker container, stand-alone, different DB types etc
|
||||
- Test should include exported data from v7 regularly.
|
||||
|
||||
|
||||
```A second, more subtle effect is produced through consistent unit testing.
|
||||
You should never call an object “reusable” until it has been reused.
|
||||
When an object is subjected to unit testing, it is immediately used in
|
||||
two contexts: the production code and the unit test itself. This forces
|
||||
the object under test to be more reusable. Testing the object means you
|
||||
will need to supply stubs or mocks in place of real objects. That means
|
||||
the object must expose its dependencies as properties, thereby making
|
||||
them available for dependency injection in the production code. When
|
||||
an object requires extensive configuration in its external context (like
|
||||
the previously mentioned Customer object), it becomes difficult to unit
|
||||
test. One common—and unfortunate—response is to stop unit testing
|
||||
such objects. A better response is to reduce the amount of external context required.
|
||||
In the example of the Customer domain object, extracting
|
||||
its persistence responsibilities reduces the amount of external context
|
||||
you have to supply. This makes it easier to unit test and also reduces
|
||||
the size of Customer’s crystal—thereby making Customer itself more malleable.
|
||||
The cumulative effect of many such small changes is profound.```
|
||||
|
||||
|
||||
DOCUMENTATION
|
||||
=-=-=-=-=-=-=
|
||||
All documentation will be primarily in Markdown format following the Commonmark spec http://commonmark.org/help/.
|
||||
See tooling doc for how to use commonmark markdown
|
||||
If other formats are required they will be generated *from* the markdown.
|
||||
The api should be self documenting so docs can be generated and api routes can provide information and examples
|
||||
i.e. while coding write the docs for each route / method etc.
|
||||
IF we want to do a web sequence diagram there is a handy tool:
|
||||
- https://www.websequencediagrams.com/
|
||||
|
||||
|
||||
|
||||
|
||||
ERROR MESSAGES
|
||||
The 4 H’s of Error Messages
|
||||
|
||||
Human
|
||||
Helpful
|
||||
Humorous
|
||||
Humble
|
||||
|
||||
|
||||
228
devdocs/features.txt
Normal file
@@ -0,0 +1,228 @@
|
||||
# RAVEN FEATURES / CHANGES FROM AYANOVA 7.x
|
||||
|
||||
* Items with an asterisk are completely new
|
||||
|
||||
## FOUNDATIONAL ITEMS / NFR
|
||||
( Things that need to exist right off the start)
|
||||
|
||||
### SECURITY / AUTHENTICATION
|
||||
|
||||
- SECURITY GROUPS / RIGHTS
|
||||
- https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/1809
|
||||
- Moving to roles
|
||||
|
||||
|
||||
- AUTHENTICATION
|
||||
|
||||
### COMMON BUSINESS OBJECT PROPERTIES
|
||||
- *Tags
|
||||
- Regions
|
||||
- Attachments and docs
|
||||
- Wiki pages
|
||||
- Custom fields
|
||||
- Common actions quantified and identified
|
||||
- Event log (actions / events enum)
|
||||
- Notification (notifiable interface?)
|
||||
- Support log (feature usage, crashes, ui element used)
|
||||
- BIZ object switch / circuit breaker
|
||||
- ID number generator for PO's workorders, quotes, pms etc. Unique number for each type.
|
||||
|
||||
|
||||
### SHELL / MENU
|
||||
|
||||
|
||||
## FUNCTIONAL REQUIREMENTS
|
||||
|
||||
### FEATURES
|
||||
- MRU - shell
|
||||
- PLUGINS
|
||||
- WIKI PAGE
|
||||
- LOGOUT
|
||||
- SUBGRIDS
|
||||
- CLIENT GROUPS
|
||||
- DISPATCH ZONES
|
||||
- PART ASSEMBLIES
|
||||
- PART CATEGORIES
|
||||
- PARTS WAREHOUSES
|
||||
- PRIORITIES
|
||||
- RATES
|
||||
- TAX CODES
|
||||
- UNIT MODEL CATEGORIES
|
||||
- UNITS OF MEASURE
|
||||
- UNIT SERVICE TYPES
|
||||
- USER CERTIFICATIONS
|
||||
- USER SKILLS
|
||||
- WORKORDER CATEGORIES
|
||||
- WORKORDER ITEM TYPES
|
||||
- WORKORDER STATUSES - BIG NEW FEATURES FOR THIS ONE
|
||||
- QUICK OPEN WORKORDER / QUOTE / PM BY NUMBER
|
||||
- HELP
|
||||
- CONTENTS F1 (goes to manual online)
|
||||
- TECHNICAL SUPPORT (goes to the forum)
|
||||
- CHECK FOR UPDATES (popup dialog)
|
||||
- PURCHASE LICENSES (goes to license FAQ page, not purchase page)
|
||||
- ABOUT AYANOVA (shows some support info as well as version)
|
||||
- LICENSE (enter / view license)
|
||||
- EXIT (closes AyaNova, weird that it's there and help isn't last)
|
||||
- CUSTOMIZE TEXT (administrator only)
|
||||
|
||||
|
||||
### DASHBOARD
|
||||
|
||||
|
||||
### SERVICE WORKORDERS
|
||||
- SERVICE WORKORDER
|
||||
- SERVICE WORKORDER TEMPLATES
|
||||
|
||||
### QUOTES
|
||||
- QUOTE WORKORDER
|
||||
- QUOTE TEMPLATES
|
||||
|
||||
### PREVENTIVE MAINTENANCE
|
||||
- PM WORKORDER
|
||||
- PM TEMPLATES
|
||||
|
||||
|
||||
### SCHEDULE
|
||||
|
||||
|
||||
### INVENTORY
|
||||
- PARTS
|
||||
- PURCHASE ORDERS
|
||||
- PO ITEMS
|
||||
- PURCHASE ORDER RECEIPT
|
||||
- PO RECEIPT ITEMS
|
||||
- ADJUSTMENTS
|
||||
- ADJUSTMENT ITEMS
|
||||
- PART INVENTORY
|
||||
- PART REQUESTS
|
||||
|
||||
|
||||
### CLIENTS
|
||||
- CLIENTS
|
||||
- HEADOFFICE
|
||||
- CONTRACTS
|
||||
- PROJECTS
|
||||
- CUSTOMER SERVICE REQUESTS
|
||||
|
||||
|
||||
### UNITS
|
||||
- UNITS
|
||||
- UNIT MODELS
|
||||
- LOAN ITEMS
|
||||
|
||||
### VENDORS
|
||||
- VENDORS (only item, nothing else here, hmmmm.....)
|
||||
|
||||
|
||||
### USER PANE
|
||||
- MEMOS
|
||||
- NOTIFICATION SUBSCRIPTIONS
|
||||
- NOTIFICATION DELIVERIES (user or all if manager account)
|
||||
- WIKI PAGE
|
||||
|
||||
### SEARCH
|
||||
- has no sub items at all (will be deprecated and moved into every page at top)
|
||||
|
||||
### ADMINISTRATION
|
||||
|
||||
- *ONBOARDING / GUIDED SETUP
|
||||
|
||||
- GLOBAL SETTINGS
|
||||
- REGIONS
|
||||
- SECURITY GROUPS
|
||||
- USERS
|
||||
- CUSTOM FIELDS DESIGN
|
||||
- LOCALIZED TEXT DESIGN
|
||||
- NOTIFICATION DELIVERIES
|
||||
- REPORT TEMPLATES
|
||||
- FILES IN DATABASE
|
||||
- SCHEDULE MARKERS
|
||||
|
||||
|
||||
### PLUGINS / ADD-ONS
|
||||
- AyaScript
|
||||
- DUMP
|
||||
- ExportToExcel
|
||||
- ImportExportCSV
|
||||
- Merger
|
||||
- OutlookSchedule
|
||||
- PTI
|
||||
- QBI
|
||||
- QBOI
|
||||
- QuickNotification
|
||||
- XTools
|
||||
- OL
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Dec 29th 2017 new workorder structure:
|
||||
|
||||
Users will use templates heavily to get predefined workorder things pre-added so they can just enter what they need.
|
||||
Create new workorder, templates are offered with that option automatically and maybe some built in ones to get people going.
|
||||
|
||||
Instead of the todo and completed sections, each line has a completed or not status and it displays differently
|
||||
so can still see at a glance what is done or not done. Items group within category by completed status.
|
||||
|
||||
So items with quantities are not "completed" until they have an entry in their Quantity field. Suggested quantities only are incomplete.
|
||||
Items without Quantities have a "completed" or "serviced" check-box or some alternative that still gives equivalent to completed.
|
||||
|
||||
|
||||
Instead of Service and materials is the above features, however there could still be a similar view for certain roles.
|
||||
|
||||
Can select charge to customer in same record if completed (or added records with real quantities).
|
||||
By default charges to customer automatically, maybe a setting though?
|
||||
|
||||
Invoice and payments etc still to be determined
|
||||
_______________________________________
|
||||
[WO HEADER BLOCK]
|
||||
- header items
|
||||
[TODOS BLOCK]
|
||||
[TODO1 HEADER BLOCK]
|
||||
|
||||
TODO1 ITEMS
|
||||
- Scheduled techs [no completed status?, does have a checkin feature maybe checkin triggers reveal completed as checkout equivalent?]
|
||||
- Parts [suggested qty and real qty]
|
||||
- Parts requested / on order ["completed" when all received]
|
||||
- Unit(s) [serviced checkbox]
|
||||
- Tasks [already has completed checkbox]
|
||||
- Outside service shipping [suggested and real]
|
||||
- Outside service repairs [suggested and real]
|
||||
- Loan item [suggested qty and real qty]
|
||||
- Custom fields [does this need some kind of completed?]
|
||||
- Travel [suggested and real qty fields]
|
||||
- Labor [suggested and real qty fields]
|
||||
- Expenses [suggested and real qty fields]
|
||||
|
||||
|
||||
TODO2 HEADER
|
||||
TODO ITEMS
|
||||
DONE ITEMS
|
||||
|
||||
|
||||
.....
|
||||
|
||||
|
||||
INVOICE - this is a view that shows bill to customer items, same stuff different view
|
||||
PAYMENTS
|
||||
PROFIT AND LOSS
|
||||
_________________________________________
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
37
devdocs/project-goals.txt
Normal file
@@ -0,0 +1,37 @@
|
||||
This case is for overall goals of what or how AyaNova 8 should be different / improved
|
||||
____________________________________________________________________________________________
|
||||
|
||||
- FIRST AND FOREMOST: has to be something I can sell and support alone with minimal effort
|
||||
if any part of the process requires me to spend time regularly on it then that needs to be eliminated or automated.
|
||||
- DIRECT PORT of AyaNova 7.x, try to keep as similar as possible but improving where necessary
|
||||
- It would be crazy to try to re-invent the wheel on this
|
||||
- Easier to upgrade for the customer - no hassle updates
|
||||
- Should be a black box that only relies on the db and the configuration to be updated.
|
||||
- SIMPLER FOR A USER TO UNDERSTAND
|
||||
- Simplify the structure, anything confusing or redundant eliminate
|
||||
- Things like TAGS which eliminate the need for a bunch of different separate objects
|
||||
- No need to learn about a separate "Dispatch Zone" object, just use a tag for that and so we can provide examples of how to tag and how to use tags without
|
||||
having to teach or support a bunch of disparate features.
|
||||
|
||||
- Modern technology as open source as possible (no proprietary code if possible)
|
||||
- More quickly add new features and not have to make huge monolithic updates but can do a quick feature add
|
||||
- Scale from standalone single user to containerized service capable of being hosted for thousands
|
||||
- Fully secure to modern standards that are appropriate
|
||||
- Cleaner interface, fewer separate objects, more use of tags instead of discrete categorization type objects
|
||||
- Easy to use modern API, self documented for users with example snippets
|
||||
- Easy to learn with guided training
|
||||
- Easier to support for us and the customer
|
||||
- Easier to install for the customer
|
||||
- Easier to sell and manage sales end of it
|
||||
- Easier to maintain for the customer (backups, upgrades etc)
|
||||
- Better looking, modern, clean more focused interface that is easier to enter data into quickly
|
||||
- Imports as much of AyaNova 7.x data as possible automatically
|
||||
- Easier to trial for test users
|
||||
- Something we can offer to host with as minimal hassle to us as possible
|
||||
- Operating system agnostic
|
||||
- Responsive interface for a variety of screen sizes (or at least wide and phone)
|
||||
- Automated build of api docs that can be accessed from the api itself
|
||||
- Automated or easier generation of manual / MARKDOWN
|
||||
- Support the latest browser and the one before it of every major browser like gitlab does:
|
||||
- Supported web browsers, We support the current and the previous major release of Firefox, Chrome/Chromium, Safari and Microsoft browsers (Microsoft Edge and Internet Explorer 11).
|
||||
- Each time a new browser version is released, we begin supporting that version and stop supporting the third most recent version.
|
||||
244
devdocs/research.txt
Normal file
@@ -0,0 +1,244 @@
|
||||
# Research required
|
||||
“Do the simplest thing that will work.”
|
||||
|
||||
|
||||
# BACK END / ARCHITECTURE
|
||||
|
||||
|
||||
## DOCKER FINDINGS
|
||||
- Need a shared docker network between all containers, I was assuming could just redirect to localhost but didn't realize docker network is internal to docker
|
||||
- this helped https://stackoverflow.com/questions/39202964/how-to-reach-another-container-from-a-dockerised-nginx?rq=1
|
||||
- first created a defined external network sudo docker network create docker-network
|
||||
- HOWEVER - I think docker has a default network so this external network probably isn't necessary, ??? MORE RESEARCH REQUIRED HERE
|
||||
- Then in the docker compose files of all involved had to put at the bottom one network statement (none in the service sections above)
|
||||
- networks:
|
||||
docker-network:
|
||||
driver: bridge
|
||||
|
||||
- need to run Nginx in front of ayanova for easiest ssl cert handling etc
|
||||
- Very good reference here: https://gist.github.com/soheilhy/8b94347ff8336d971ad0
|
||||
- Nginx needs to see ayanova via the docker host name which is the container name
|
||||
- So I had to point it like this:
|
||||
proxy_pass http://ayanova:7575;
|
||||
- Where ayanova is the image name in the docker compose file for starting AyaNova server
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## DEFAULT PORTS (unassigned) http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
- Some available ports not specifically assigned that I like the look of:
|
||||
- 4048, 4144, 4194, 4195, 4196, 4198, 4317, 4318, 4319, 4332, 4337-4339 , 4363-4365, 4366, 4424, 4434-4440, 4748, 5076-5077, 5551-5552,5994-5998
|
||||
- 7575-7587, 8084-8085
|
||||
- 7575 (RAVEN IS USING THIS)
|
||||
- 5995?
|
||||
- 4424?
|
||||
- 8084?
|
||||
|
||||
## ?? LICENSING
|
||||
- ??how to functionally license RAVEN
|
||||
- ??how to protect the keys
|
||||
|
||||
## ?? How am I going to expose the api for developers
|
||||
- Only through the rest interface?
|
||||
- Will there be plugins at the backend code level in any of those layers?
|
||||
- OUR STUFF VS END USERS STUFF
|
||||
- What existing plugins did we make and what would they need from Raven to be replicated?
|
||||
- how will this affect api decisions?
|
||||
- What can end users be allowed to do and how and at what layer.
|
||||
|
||||
|
||||
## ?? FUNCTIONAL REQUIREMENTS /FEATURES
|
||||
|
||||
Look at AyaNova existing code,
|
||||
A) what worked well
|
||||
B) what was repetitious and should be automated
|
||||
C) what had ongoing problems
|
||||
|
||||
Will help greatly with design
|
||||
|
||||
## TAGS
|
||||
- Wait to choose implementation type until basic design is decided upon as it will affect method use internally
|
||||
- This is actually a big adn tricky topic, here are some resources found while researching:
|
||||
- http://tagging.pui.ch/post/37027745720/tags-database-schemas
|
||||
- https://stackoverflow.com/questions/172648/is-there-an-agreed-ideal-schema-for-tagging
|
||||
- https://dba.stackexchange.com/a/35795 <---this is what I'm leaning towards
|
||||
- Leaning towards a single tags table with word and id (and maybe other properties like color, last used etc)
|
||||
- Each taggable other table has a map of it's own to the tags, so i.e.[TAGS: id, text] [CUSTOMERTAGS: customerID, tagId], [WORKORDERTAGS: WOID, tagID]
|
||||
- This will require a bit of complexity to keep it pruned though, i.e. tags will get orphaned when no one is using them so a routine will periodically need to clean them out.
|
||||
- This may be a better way to handle full text indexing as well with a search table for each object table. Kind of balloons the table count though, but who cares really, does that matter?
|
||||
- An alternative is exactly like how I do full text search in AyaNova now with a generic foreign key, upside is cleaner db structure, but downside is ensuring cleanup and maint.
|
||||
- If parent object is deleted must go in and remove any linked tags as well.
|
||||
- Upside is it's easily searchable for all incidents of a specific tag over all taggable types, but is this really necessary? It can still be done with separate queries.
|
||||
|
||||
|
||||
** SEPARATE FOR *MVP* PRIORITY ONE IS MUST HAVE BEFORE RELEASE 8.0 / PRIORITY 2 OR LOWER IS NICE TO HAVE **
|
||||
- go over all rockfish AyaNova related cases and request for features, into the hopper.
|
||||
- Move stuff to v8 if going forward, otherwise drop it's priority to lowest in AyaNova 7.x and any related project cases
|
||||
- Move any items that just have to be done now for v7 into high priority AyaNova 7.x track.
|
||||
- ?? What are the commonalities of things required in the UI to come from the backend based on current AyaNova
|
||||
- Name ID lists?
|
||||
- list factories
|
||||
- report factories
|
||||
- fetch by name / id
|
||||
- Email sending and processing
|
||||
|
||||
|
||||
|
||||
|
||||
## ?? CONCURRENCY ISSUES
|
||||
- In the ef core book the author mentions getting around a concurrency issue by making it a non issue by not even updating the same order but instead appending status changes to another table
|
||||
- https://livebook.manning.com/#!/book/entity-framework-core-in-action/chapter-8/v-7/171
|
||||
- "This design approach meant that I never updated or deleted any order data, which means that concurrent conflicts could not happen. It did make handling a customer change to an order a bit more complicated, but it meant that orders were safe from concurrent conflict issues."
|
||||
- Microsoft tutorial: https://docs.microsoft.com/en-us/aspnet/core/data/ef-mvc/concurrency
|
||||
- See what will be critical concurrency in RAVEN (inventory likely), see if can code around it so no concurrency can happen.
|
||||
- Plan on what is concurrency prone and how to deal with it.
|
||||
- Maybe split out the concurrent sensitive bits of a workorder (parts) to a different screen, i.e. show read only in workorder adn to update that specific section have to go in and do it like a wizard or something?
|
||||
|
||||
## ?? BACKDOOR / LOST PASSWORD
|
||||
- How to handle this, current method is horribly susceptable to nefarious purpose
|
||||
- Maybe requires "physical" access to the server (put a file up or something?)
|
||||
|
||||
## ?? AUTOMATED BACKGROUND PROCESSOR "GENERATOR"
|
||||
- What will replace "Generator" in RAVEN?
|
||||
|
||||
|
||||
## FRONT END
|
||||
|
||||
- VISUAL DESIGN / UX
|
||||
- - https://www.ventureharbour.com/form-design-best-practices/
|
||||
- Make the form designs simple and beautiful, it will directly translate into sales
|
||||
- Field labels above fields, not beside them (https://research.googleblog.com/2014/07/simple-is-better-making-your-web-forms.html)
|
||||
- Single column vertical layout for complex forms is definitely best. In wide mode though what to do?
|
||||
- It's ok to have two inputs on a line together if they are directly related so that they have one single label above them and minimal space between them
|
||||
- i.e. date and time ([day] [month] [year] [hour][minute] etc)
|
||||
- But really don't if possible because it's never better than a single field that just "figures out" the parts of the input.
|
||||
- Always indicate required fields, don't just error if they are not filled out.
|
||||
- Break big forms into sections with shaded headers or emboldened headers with larger fonts but shaded background is good even with no text to break apart.
|
||||
- Use single fields for phone numbers or postal codes and process and interpret it through code rather than forcing them to use multiple fields as this causes confusion
|
||||
- Messages to users that are important and long and required to convey need to *really* stand out to even be read. In their own box with a different background or something alike.
|
||||
- Put validation errors to the right of the input field, not below it as users can see it better and it requires less cognitive load.
|
||||
- Multi page forms (like wizards) need to have progress indicators or people will get antsy and bail
|
||||
- Favor checkboxes or radio buttons over drop downs unless there are more than 6 or so options as they require less cognitive load
|
||||
- Use placeholders sparingly (light gray prompt inside input) only when there is ambiguity, don't use them for obvious stuff
|
||||
- Always use a predictive search / autocomplete for any field that requires a selection from a large number of options
|
||||
- Selectable images are the most compelling engaging selection type for users
|
||||
- I.E. like a row of radio buttons but a row of images to select from and the choice is the selection by clicking on the image.
|
||||
- People enjoy clicking on images.
|
||||
- Use this as much as possible wherever possible. I.E. for things like
|
||||
- A trial user picking the company type they would like to trial data for
|
||||
- Input fields should be sized to reflect the amount of data required to be entered in them
|
||||
- This helps the user understand what is required of them
|
||||
- ZIP code or house number should be much shorter than an address line for example
|
||||
- Do not rely on colour alone to communicate
|
||||
- 1 in 12 men have some degree of colour blindness
|
||||
- Ensure entire forms can be navigated using the tab key.
|
||||
- Test the form in low and bright light situations on a mobile device outdoors and desktop
|
||||
- Enable browser autofill with contextual tags
|
||||
- Not exactly sure what this means in practice but it sounds good
|
||||
- Use visual cues and icons, brains process visual cues way faster than text alone
|
||||
- Don't ask for a password twice, use a eyeball icon to reveal text instead like keepass does
|
||||
- Mobile should be at least 16px high text / desktop can be 14px
|
||||
|
||||
|
||||
- FRONT END FRAMEWORK
|
||||
- https://stackshare.io/stacks (interesting tool to see what other companies use in their stacks)
|
||||
- https://semantic-ui.com/
|
||||
- Vue.js https://vuejs.org/v2/guide/comparison.html
|
||||
- JS IN HTML via directives (kind of like Angular)
|
||||
- State management library if required: https://github.com/vuejs/vuex
|
||||
- Similar to redux but different
|
||||
- Module for VS Code!! https://vuejs.github.io/vetur/
|
||||
|
||||
- PROS:
|
||||
- GUIDE: https://vuejs.org/v2/guide/
|
||||
- Simpler to code than React "Vue has the most gentle learning curve of all js frameworks I have tried"
|
||||
- Single file components, all aspects of a component are in the same file (css, js, html)
|
||||
- UI LIBRARY: http://element.eleme.io/#/en-US
|
||||
- Everyone keeps claiming it's more productive
|
||||
- Comes with a databinding and MVC model built in
|
||||
- CLI project generator
|
||||
- Has official routing solution and state management solutions
|
||||
- They are starting to work on a native library like react native (not there yet though and maybe I don't care about that so much)
|
||||
- Getting things done over "purity"
|
||||
- http://pixeljets.com/blog/why-we-chose-vuejs-over-react/
|
||||
- Working with html forms is a breeze in Vue. This is where two-way binding shines.
|
||||
- Vue is much simpler than AngularJS, both in terms of API and design. Learning enough to build non-trivial applications typically takes less than a day, which is not true for AngularJS
|
||||
- CONS:
|
||||
- Less answers on stackoverflow than React (for example)
|
||||
- Vue on the other hand only supports IE9+ (not sure if this is a con or not)
|
||||
- Doesn't have a major corporation behind it like React or Angular
|
||||
- (2016)runtime errors in templates are still a weak point of Vue - exception stacktraces in a lot of times are not useful and are leading into Vue.js internal methods
|
||||
- React.js
|
||||
- HTML in JS
|
||||
- PROS:
|
||||
- Very widely used, tons of resources
|
||||
- Works with huge apps
|
||||
- Can be compiled to native apps (somehow, though Vue is working on it)
|
||||
- CONS:
|
||||
- Requires a good grasp of javascript
|
||||
- Harder to code
|
||||
- Requires a *TON* of add-on libraries as it only deals with the view part
|
||||
- "PURITY" over getting things DONE
|
||||
- Very nitpicky and fuckery prone just to do things the official way, slower to get business objectives accomplished
|
||||
- Ember
|
||||
- PROS:
|
||||
- Requires less knowledge of javascript
|
||||
- Comprehensive includes all bits
|
||||
- Suited to small teams
|
||||
- CONS:
|
||||
- Angular
|
||||
- PROS:
|
||||
- Google and Microsoft supported
|
||||
- Supposed to be good for someone coming from C# (typescript)
|
||||
- CONS:
|
||||
- Requires a *lot* of learning to use
|
||||
- Typescript
|
||||
- Angular directives and ways of doing things which are peculiar to it
|
||||
- Possibly slower than others to render
|
||||
|
||||
|
||||
- ARCHITECTURE / TECHNOLOGY STACK
|
||||
- Electron hosted desktop app like vs code or Slack:
|
||||
- What is the advantage of Electron hosted app vs just a plain html 5 app? (nothing)
|
||||
- https://slack.com/downloads/windows
|
||||
- https://blog.bridge.net/widgetoko-a-node-js-and-electron-application-written-in-c-1a2be480e4f9
|
||||
|
||||
|
||||
|
||||
- PERFORMANCE: Do not send one byte extra that is not needed at the client
|
||||
- Not even a single space,
|
||||
- especially not extra libraries or unminified code or cases
|
||||
- ICONS and webfonts should have only what is needed, nothing more.
|
||||
- ??USING A CDN??
|
||||
- VERSIONING STATIC RESOURCES
|
||||
- Use Gulp, process with hash *in filename* not as a query string (better supported by intermediate caching or proxy servers)
|
||||
- https://docs.microsoft.com/en-us/aspnet/core/client-side/using-gulp
|
||||
- https://code.visualstudio.com/docs/editor/tasks
|
||||
- https://hackernoon.com/how-to-automate-all-the-things-with-gulp-b21a3fc96885
|
||||
- https://github.com/sindresorhus/gulp-rev
|
||||
- https://github.com/jamesknelson/gulp-rev-replace
|
||||
- FORM VALIDATION:
|
||||
- https://developer.mozilla.org/en-US/docs/Learn/HTML/Forms/Form_validation
|
||||
- ?? framework for complex UI required or plain old javascript? (imagine a workorder or PO)
|
||||
- Need a lot of shit on forms, maybe a framework is the way to go:
|
||||
- VALIDATION / DIRTY CHECK
|
||||
- AJAX FEEDBACK
|
||||
- ??
|
||||
|
||||
|
||||
- ?? Funky graphs
|
||||
- ?? Markdown
|
||||
- Generate html pages from Markdown docs: https://github.com/Knagis/CommonMark.NET
|
||||
- Markdown UI editor I haven't evaluated yet: https://github.com/nhnent/tui.editor
|
||||
|
||||
|
||||
- ?? TESTING
|
||||
- ??Automated UI testing in browser.
|
||||
- To catch browser changes that break functionality.
|
||||
- Get a quick tool overview.
|
||||
- Also can it use diff. browsers and devices?
|
||||
|
||||
231
devdocs/solutions.txt
Normal file
@@ -0,0 +1,231 @@
|
||||
# Raven solutions
|
||||
*Solutions, tools and techniques to accomplish goals from research*
|
||||
|
||||
“Do the simplest thing that will work.”
|
||||
|
||||
|
||||
|
||||
## Middleware docs
|
||||
- https://docs.microsoft.com/en-us/aspnet/core/fundamentals/middleware/?tabs=aspnetcore2x
|
||||
|
||||
## API FEATURES
|
||||
- Ability to set whole api to read only mode by administration or by code like backup routines
|
||||
|
||||
### Biz object TAGGER / MARKER INTERFACE or ATTRIBUTES (ITaggable, IAttachable etc etc)
|
||||
- Apparently should use attribute not interfaces: https://stackoverflow.com/questions/2086451/compelling-reasons-to-use-marker-interfaces-instead-of-attributes
|
||||
- https://docs.microsoft.com/en-us/dotnet/standard/attributes/writing-custom-attributes
|
||||
- But if I do use interfaces or need to work with them in future then:
|
||||
- Map all objects on boot: https://garywoodfine.com/get-c-classes-implementing-interface/
|
||||
- It's called a tagging interface: https://en.wikipedia.org/wiki/Marker_interface_pattern
|
||||
- https://stackoverflow.com/questions/15138924/c-sharp-how-to-determine-if-a-type-implements-a-given-interface
|
||||
|
||||
|
||||
|
||||
## AUTOMATIC JOB SCHEDULER / RUNNER
|
||||
- jobs in background required for auto backup, mailing, notifications
|
||||
- https://docs.microsoft.com/en-us/dotnet/standard/microservices-architecture/multi-container-microservice-net-applications/background-tasks-with-ihostedservice
|
||||
|
||||
- Probably don't need a full fledged scheduler because the above should work, but just in case here are some:
|
||||
- Fluent Scheduler looks simpler than hangfire, supports .net core
|
||||
- https://github.com/fluentscheduler/FluentScheduler
|
||||
- Chroniton looks super basic
|
||||
- https://github.com/leosperry/Chroniton/wiki/Example-ASP.NET-Core
|
||||
|
||||
|
||||
## VALIDATION / BUSINESS RULES (FRONT AND BACK)
|
||||
- To run in both places looks like JSON schema is the way to go, it can be stored independent and validated at both ends
|
||||
- It's a mature standard and is platform agnostic
|
||||
- Tutorial from AJV guy: https://code.tutsplus.com/tutorials/validating-data-with-json-schema-part-1--cms-25343
|
||||
- https://github.com/RSuter/NJsonSchema This generates and validates schema in .net world and is open source MIT license
|
||||
- https://github.com/epoberezkin/ajv This is seemingly the gold standard for javascript based
|
||||
- Lesser things looked at:
|
||||
- https://github.com/cachecontrol/json-rules-engine 128 stars might be adaptable
|
||||
- https://github.com/rsamec/business-rules-engine //this one is for javascript and kind of limited but gives some good ideas
|
||||
|
||||
|
||||
## RESOURCE ACCESS LAYER (DATABASE)
|
||||
- DATABASE
|
||||
- Support Postgresql only out of the box, consider other db's later
|
||||
- CONCURRENCY IS BUILT INTO EFCORE: https://docs.microsoft.com/en-us/ef/core/saving/concurrency
|
||||
- Transactions are also built in by default as the save changes is the only point that stuff actually gets written to the db in most cases
|
||||
- MUST TEST CONCURRENCY AND TRANSACTIONS FAIL
|
||||
- CONTAINERIZED DB's
|
||||
- For development, I don't think there's anything better for databases, it beats manual setup, vagrant boxes, and shared development servers by a long shot. I feel that educating everyone on your team in how to use it is well worth the investment. docker-compose makes setting up even a fairly complicated development environment a breeze.
|
||||
- BACKUP AND RESTORE
|
||||
- DISCOURSE METHOD
|
||||
- I like it because it handles various scenarios and results in a nice SQL command file that rebuilds the whole db, not some cryptic binary format
|
||||
- Can set all api to read only mode, then dumps the db using a db command, then zips it and offers it for download
|
||||
- Backup process
|
||||
- pause the sidekiq background process worker
|
||||
- Can optionally set the api to read only mode (interesting idea)
|
||||
- dumps the data to a file in sql command format for maximum compatibility (even with other db server types puportedly)
|
||||
- Archives it
|
||||
- presents it in the UI for download
|
||||
- unpause the background worker
|
||||
- Restore process
|
||||
- This one is interesting, PGSQL has a "schema" which is a way of partitioning a database to in effect have a separate set of tables in the same db
|
||||
- They move the "public" production "schema" to a "backup" schema (effectively moving it but keeping it in the db)
|
||||
- They restore to a separate interim "restore" "schema" then they move all the tables in the restore schema to the production "public" schema (one by one in a loop)
|
||||
- I guess this way it's reversible if there is an issue but I don't see code to handle any issues
|
||||
- https://github.com/discourse/discourse/tree/master/lib/backup_restore
|
||||
- Also seems to have some capacity to send it to an AWS bitbucket or some thing, maybe an online integration with dropbox or other would be nice
|
||||
|
||||
|
||||
|
||||
|
||||
## ARCHITECTURE / NFR
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### Subdomains pointing to differetn droplets tutorial
|
||||
- https://www.digitalocean.com/community/tutorials/how-to-set-up-and-test-dns-subdomains-with-digitalocean-s-dns-panel
|
||||
|
||||
### Other stuff
|
||||
This section contains the architectural plans for all aspects of Raven driven by goals and research
|
||||
https://en.wikipedia.org/wiki/Non-functional_requirement
|
||||
|
||||
- ARchitecture reference resources:
|
||||
- https://stackoverflow.com/questions/5049363/difference-between-repository-and-service-layer
|
||||
|
||||
- ARCHITECTURE LAYERS
|
||||
- CLIENT
|
||||
- HTML 5 SPA
|
||||
- WEB API LAYER
|
||||
- REPOSITORY LAYER
|
||||
- BUSINESS LAYER (AKA DOMAIN LAYER)
|
||||
- The Business Layer is the place where all the business/domain logic, i.e. rules that are particular to the problem
|
||||
that the application has been built to handle, lives. This might be salary calculations, data analysis modelling,
|
||||
or workflow such as passing a order through different stages.
|
||||
- I'm using this: https://www.thereformedprogrammer.net/a-library-to-run-your-business-logic-when-using-entity-framework-core/
|
||||
- and this is the repo: https://github.com/JonPSmith/EfCore.GenericBizRunner
|
||||
|
||||
- DATA ACCESS LAYER (EF CORE)
|
||||
- EF CORE multiple database stuff
|
||||
- Migrations with different providers: https://stackoverflow.com/questions/42819371/ef-core-multiple-migration-sets
|
||||
- DATABASE
|
||||
|
||||
|
||||
|
||||
|
||||
- .NET CORE DEPLOYMENT
|
||||
- Basically just get the files on to the system in one case or need .net installed as a pre-requisite then drop the files on
|
||||
- https://docs.microsoft.com/en-us/dotnet/core/deploying/deploy-with-cli
|
||||
- https://docs.microsoft.com/en-us/dotnet/core/deploying/
|
||||
- KESTREL alone
|
||||
- Kestrel alone can be used but it won't work if need to share a port with something else and differentiate by host header
|
||||
- in case of host header issue can run NGinx in front or IIS
|
||||
- More to come here once we have a testable skeleton project to set up
|
||||
- STATIC FILE CACHING
|
||||
- https://andrewlock.net/adding-cache-control-headers-to-static-files-in-asp-net-core/
|
||||
|
||||
- Asp.net Core 2.0 application stack
|
||||
- .net core WEBAPI project
|
||||
- Swagger for documentation
|
||||
|
||||
- REST best practices
|
||||
- Excellent reference guide here: https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md
|
||||
- URLS: A good api url: https://api.ayanova.com/v1.0/client/22
|
||||
- Keep length under 2000 characters for maximum client compatibility
|
||||
- concurrency (The ETag response-header field provides the current value of the entity tag for the requested variant. Used with If-Match, If-None-Match and If-Range to implement optimistic concurrency control.)
|
||||
- JSON property names SHOULD be camelCased.
|
||||
- There are specific Date, time, Duration, Interval formats that should be used (https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#113-json-serialization-of-dates-and-times)
|
||||
- Error response: The error response MUST be a single JSON object. This object MUST have a name/value pair named "error." The value MUST be a JSON object.
|
||||
This object MUST contain name/value pairs with the names "code" and "message," and it MAY contain name/value pairs with the names "target," "details" and "innererror."
|
||||
eg: error:{code=1200,message="blah"} error:{code=1200,message="blah",target="eg property name", details="details for programmer", innererror:}
|
||||
- Versioning: this is an area I need to treat carefully, there are tools to make it easier:
|
||||
- I will use URL Path segment versioning, i.e. api/v1.0/client/22
|
||||
- https://www.hanselman.com/blog/ASPNETCoreRESTfulWebAPIVersioningMadeEasy.aspx
|
||||
- https://github.com/Microsoft/aspnet-api-versioning/wiki
|
||||
- https://github.com/Microsoft/aspnet-api-versioning/tree/master/samples/aspnetcore/SwaggerSample
|
||||
- Push notifications (I.e. on a client record being created or a workorder updated or...?)
|
||||
- If so there is a segment in the rest doc from microsoft that goes over it in detail
|
||||
- API THROTTLING / RATE LIMITING
|
||||
- https://github.com/stefanprodan/AspNetCoreRateLimit
|
||||
- ASYNC / BUSINESS LAYER
|
||||
- https://stackoverflow.com/questions/42276149/best-practice-for-using-async-await-in-webapi
|
||||
- https://stackoverflow.com/questions/41719661/asp-net-core-making-service-asynchronous
|
||||
- http://www.codemag.com/article/1701061 BUSINESS LAYER STUFF
|
||||
- https://github.com/RickStrahl/AlbumViewerVNext
|
||||
|
||||
## AUTOMAPPER
|
||||
- Sounds like a tool I might need, here is a good tutorial: https://dotnetcoretutorials.com/2017/09/23/using-automapper-asp-net-core/
|
||||
|
||||
|
||||
|
||||
## TESTING
|
||||
- TESTING performance / load / resiliency testing
|
||||
- https://docs.microsoft.com/en-us/aspnet/core/testing/
|
||||
- Spend the time to generate realistic production level data in large quantity for testing
|
||||
- Do not do any integration testing without realistic data
|
||||
- Data generation for testing could be re-used for trial data generation for customer evaluation purposes
|
||||
- Test with production sized data !!!! (did not do this properly when doing AyaNova originally)
|
||||
- TOOLS
|
||||
- xUnit https://xunit.github.io/
|
||||
- Mocking, different test runners here: http://asp.net-hacker.rocks/2017/03/31/unit-testing-with-dotnetcore.html
|
||||
- GENFU - tries to automatically fill objects which sounds dicey https://github.com/MisterJames/GenFu/
|
||||
- https://github.com/bchavez/Bogus This is a .net port of faker.js and as such probably a good choice
|
||||
- Testing with a docker container (old but interesting) https://devblog.xero.com/getting-started-with-running-unit-tests-in-net-core-with-xunit-and-docker-e92915e4075c
|
||||
## LOGGING
|
||||
- Watch out, logging can be a huge performance drain, test with logs on and off to see what and if necessary replace logging class with something faster.
|
||||
|
||||
|
||||
|
||||
|
||||
## REFERENCE RESOURCES
|
||||
|
||||
|
||||
- ASP.NET CORE FUNDAMENTALS DOCS
|
||||
- Everything to do with asp.net core fundamental coding aspects
|
||||
- https://docs.microsoft.com/en-us/aspnet/core/fundamentals/?tabs=aspnetcore2x
|
||||
|
||||
- AWESOME .NET CORE
|
||||
- Has a list of solutions for .net core project needs
|
||||
- https://github.com/thangchung/awesome-dotnet-core
|
||||
|
||||
|
||||
- THIS TYPE OF PROJECT REFERENCE CODE
|
||||
- NICE DIAGRAM OF OUR ARCHITECTURE FOR RAVEN: http://www.dotnetcurry.com/entityframework/1348/ef-core-web-api-crud-operations
|
||||
- https://github.com/RickStrahl/AlbumViewerVNext
|
||||
- This is a good one, it uses .net core, runs on multiple platforms, has an angular front end, is from a seasoned practical developer etc etc
|
||||
- https://github.com/dodyg/practical-aspnetcore
|
||||
- .net samples and tutorials on official docs page
|
||||
- https://docs.microsoft.com/en-us/dotnet/samples-and-tutorials/
|
||||
- https://samueleresca.net/2017/02/implementing-solid-data-access-layers-using-asp-net-core/
|
||||
- AUTOMAPPER: https://github.com/AutoMapper/AutoMapper/wiki/Getting-started
|
||||
- https://www.infragistics.com/community/blogs/dhananjay_kumar/archive/2016/03/07/how-to-implement-the-repository-pattern-in-asp-net-mvc-application.aspx
|
||||
|
||||
|
||||
- ORCHARD
|
||||
- Almost the perfect reference application, they are doing what I will be doing but for a CMS
|
||||
- https://github.com/OrchardCMS/OrchardCore
|
||||
- This samples link actually contains a lot of useful info as well like multi-tenanting stuff etc
|
||||
- https://github.com/OrchardCMS/OrchardCore.Samples
|
||||
|
||||
- DISCOURSE
|
||||
- this app is kind of what raven will become in many ways architecturally,
|
||||
- It's a message board that is modern uses Postgresql, digital ocean docker container, mobile and desktop browser UI
|
||||
- It's open source so plunder it's source code here:
|
||||
- https://github.com/discourse/discourse
|
||||
- https://github.com/discourse/discourse_docker/blob/master/samples/standalone.yml
|
||||
|
||||
|
||||
|
||||
- DOCKER CONTAINERIZED APP BUILD GUIDE
|
||||
- This guide has a *lot* of good info in it that is right up my alley:
|
||||
- https://www.red-gate.com/simple-talk/sysadmin/containerization/overcoming-challenges-microservices-docker-containerisation/?utm_source=simpletalk&utm_medium=pubemail&utm_content=20170926-slota5&utm_term=simpletalkmain
|
||||
|
||||
- FRONT END DASHBOARD
|
||||
|
||||
- Cool dashboard with graphs and shit: https://github.com/tabler/tabler?utm_source=DigitalOcean_Newsletter
|
||||
|
||||
## HELP AND DOCUMENTATION RESOURCES
|
||||
- ONLINE HELP FOR RAVEN CUSTOMERS
|
||||
- The Jenkins help page has a really good layout for help with Guided Tour, User Handbook, Resources and Recent Tutorials on left panel
|
||||
- https://jenkins.io/doc/pipeline/tour/hello-world/
|
||||
|
||||
|
||||
## GRAPHICS / ARTWORK UI FANCIFICATION RESOURCES
|
||||
- Free for use background image generator that looks really nice and soothing: https://coolbackgrounds.io/
|
||||
- Free for any use even without attribution stock photography: https://unsplash.com/
|
||||
17
devdocs/specs/admin-settings-business.txt
Normal file
@@ -0,0 +1,17 @@
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3488
|
||||
Was all this stuff below, however, will be different in RAVEN
|
||||
|
||||
### ADMINISTRATION
|
||||
|
||||
- *ONBOARDING / GUIDED SETUP
|
||||
|
||||
- GLOBAL SETTINGS
|
||||
- REGIONS
|
||||
- SECURITY GROUPS
|
||||
- USERS
|
||||
- CUSTOM FIELDS DESIGN
|
||||
- LOCALIZED TEXT DESIGN
|
||||
- NOTIFICATION DELIVERIES
|
||||
- REPORT TEMPLATES
|
||||
- FILES IN DATABASE
|
||||
- SCHEDULE MARKERS
|
||||
1
devdocs/specs/admin-settings-system-operations.txt
Normal file
@@ -0,0 +1 @@
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3488
|
||||
3
devdocs/specs/authentication.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
Authentication
|
||||
|
||||
|
||||
2
devdocs/specs/client-group-deprecated.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Replace client group with tags, see case 3441, 3373
|
||||
|
||||
1
devdocs/specs/client-service-requests.txt
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
1
devdocs/specs/clients.txt
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
6
devdocs/specs/core-attachments.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
# Attachments specifications
|
||||
|
||||
## TODO
|
||||
|
||||
- Need core dlToken route for not just attachments
|
||||
- There should be a notification for ops if a file attachment is physically not found when it has a db record see Attachmentcontroller line 407
|
||||
53
devdocs/specs/core-backup-and-restore.txt
Normal file
@@ -0,0 +1,53 @@
|
||||
BACKUP AND RESTORE SPECS
|
||||
|
||||
CASES
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
BACKUP
|
||||
- Backup to independent text format using JSON text files like DB DUMP and Discourse does
|
||||
- Backup all attached files (encrypt?)
|
||||
- Encrypt the backup file archive or each file (need to determine) so that the backup file can't be read independent of RAVEN
|
||||
- Encryption key needs to be user selectable because we don't want any AyaNova user to be able to restore any other AyaNova users db
|
||||
|
||||
- Optionally automatically send the backups to online destinations such as
|
||||
- AWS web storage
|
||||
- Dropbox
|
||||
- Mailbox if under certain size
|
||||
- Look into whatever apis are available for other online storage services
|
||||
- Download backups
|
||||
- Backup during closed window where server is not available for anything but read only operations during backup window
|
||||
- User configurable backup time
|
||||
- User configurable encryption key in environment variable? If not set then not encrypted??
|
||||
|
||||
|
||||
|
||||
|
||||
RESTORE
|
||||
- Automatically closes api before RESTORE
|
||||
- Restore from backup locations can save to?
|
||||
- Or at least a method to fetch and list those backups to fetch to local?
|
||||
- Upload a backup file for restoration
|
||||
- Decrypts data during restore process
|
||||
- Must use user provided key and there should be some kind of marker file or something to verify it decrypted properly and is a valid AyaNova backup
|
||||
- Restore the attached files (decrypt?)
|
||||
- Uses user configurable encryption key
|
||||
|
||||
ROLES
|
||||
- Ops ful, biz full can
|
||||
- modify backup configuration
|
||||
- Restore
|
||||
- Backup
|
||||
|
||||
- OpsLImited and biz limited can
|
||||
- view the backup and restore configuration
|
||||
- Backup
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
10
devdocs/specs/core-check-for-updates.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
CHECK AND UPDATE SPECS
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
- Must confirm current RAVEN is licensed before checking for updates with a current and valid support and updates license
|
||||
- We don't want people to initiate an update if they aren't eligable
|
||||
- We also don't want Raven to allow it to run if the new code was built after the current support and updates in the license expired
|
||||
- Check internal license
|
||||
- Check externally with Rockfish if licensed
|
||||
- Probably best if it just has an update check route to Rockfish where it feeds license serial number (obfuscated?) to Rockfish so it can see which license it is currently using
|
||||
46
devdocs/specs/core-documentation.txt
Normal file
@@ -0,0 +1,46 @@
|
||||
Documentation
|
||||
|
||||
HELP MANUAL POINTS TO CONSIDER
|
||||
|
||||
ONBOARDING
|
||||
- The manual and/or guides and/or built into UI guided help needs to answer all the specific questions people have when onboarding
|
||||
- For example a section for technicians, dispatcher, each business role and then under that answers to basic questions:
|
||||
- How do I see my workorders that are open
|
||||
- How do I see who is where today
|
||||
- How do order inventory
|
||||
- The old manual and guide weren't job and task oriented so it doesn't directly answer questions people have,
|
||||
it just shows how to use features which isn't the way people approach it when familiarizing themselves.
|
||||
|
||||
PRE-SALES
|
||||
- Similar to onboarding but a higher level view, just basically answering the questions "Can it do XXX??"
|
||||
- Perhaps it's the same as onboarding but you have to click through for ever increasing detail so you can abandon once your question is answered
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- User docs and manual written in Markdown format (commonmark).
|
||||
- MKDOCS tool used to generate static html "site" for docs
|
||||
- Docs versioned into folders by major/minor version, so different docs for 8.1 and again different for 8.2. Patch revisions don't change docs.
|
||||
- Static docs site incorporated into AyaNova backend so can view the docs for that release inside it directly.
|
||||
- Docs written in parallel with development as per agile principles so as soon as a feature is added it's documented as well.
|
||||
|
||||
## Markdown for documentation
|
||||
- [Commonmark cheat sheat](http://commonmark.org/help/)
|
||||
- [Commonmark testing 'dingus'](http://commonmark.org/help/)
|
||||
- Convert markdown to other formats with this tool http://pandoc.org/
|
||||
- Useful for standalone docs maybe
|
||||
|
||||
- Auto generate a static site from markdown docs with this developer tool: http://www.mkdocs.org/
|
||||
- Material theme for MKDOCS https://squidfunk.github.io/mkdocs-material/
|
||||
- MKDocs WIKI (themes, solutions to tricky things etc) https://github.com/mkdocs/mkdocs/wiki
|
||||
|
||||
** MKDOCS **
|
||||
- HOW TO RUN IT: python -m mkdocs serve to run the server, python -m mkdocs build to build the site
|
||||
- package is tragically out of date on my debian, have to install manually
|
||||
- Already had Python 2.7.13 (mkdocs page says 2.7.2 in it's example but 2.7 is ok)
|
||||
- didn't have PIP so have to get that, selected python-pip in synaptic (v9.0.1-2)
|
||||
- Installed mkdoc by running pip install mkdoc
|
||||
- apparently not in path have to preface command with python like so: python -m mkdocs new ayanova
|
||||
- Also installed the Material theme which looks a lot nicer
|
||||
18
devdocs/specs/core-email-ops.txt
Normal file
@@ -0,0 +1,18 @@
|
||||
EMAIL OPS SPECS
|
||||
|
||||
CASES
|
||||
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
|
||||
- Supports notifications via generator
|
||||
|
||||
- Needs to deliver email
|
||||
- Plain text, probably not html for now
|
||||
- Attachments are a possibility and should be allowed for
|
||||
- A simple api that can be called by generator and others as necessary
|
||||
|
||||
|
||||
- FUTURE: some future features may require reading email as well
|
||||
110
devdocs/specs/core-generator.txt
Normal file
@@ -0,0 +1,110 @@
|
||||
GENERATOR SPECS
|
||||
|
||||
CASES
|
||||
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
|
||||
- Is accessible from OPERATIONS endpoint see core-long-running-operations.txt
|
||||
|
||||
|
||||
Use db table OPERATIONS queue to submit jobs for generator to handle.
|
||||
|
||||
Generator looks in table periodically for work to do and if the criteria matches starts the job by calling out to the submitting code
|
||||
- Generator is only concerned with the job starting and ending and logging, that kind of thing, it does no work on it's own directly
|
||||
- Start date time
|
||||
- Is api locked and can it run when api is locked
|
||||
- Is job exclusive, i.e. no other jobs while it's running and or api is locked
|
||||
- Updates into a operationstatus table and endpoint that is open when api is locked
|
||||
- Deletes status succeeded operations and their operationstatus entries after 24 hours automatically
|
||||
- Deletes status FAILED operations and their logs after 2 weeks automatically
|
||||
- New jobs cannot be submitted via the rest api when the REST interface is locked but it can be queried to see what is happening
|
||||
- Hands off completed jobs back to submitter for resolution
|
||||
- I.E. if a workorder was generated from a PM then it goes back to the PM and says "DONE THIS JOB" or "FAILED THIS JOB" and the PM code handles resubmitting a new job for next time
|
||||
- This way generator does less and isn't responsible for hairy complex code that is best handled at the source of the job
|
||||
|
||||
- JOB is actually handed off back to the object that submitted it into the db in the first place to do the work
|
||||
- i.e. generator sees a send notification job for a workorder status change, it is ready to go so it in turn passes the job ID or info back to the workorder-process-notification code to handle and takes back the result
|
||||
- i.e. a workorder may submit a "notify users about status change" job, generator sees it and in turn calls notify users code in workorder which in turn creates new jobs to send each email
|
||||
generator then sees send email jobs and in each case hands them off to the email processor to deal with
|
||||
|
||||
|
||||
|
||||
|
||||
TODO GO THROUGH GENERATOR CASES AND v7 GENERATOR CODE, COMPILE A LIST OF WHAT GENERATOR NEEDS TO DO
|
||||
- Need a list of everything RAVEN generator will need to do
|
||||
- BREAK out into small separate concerns
|
||||
- Make a spec doc for each separate concern (i.e. one for the process loop, one for the LRO stuff, one for the physical delivery etc)
|
||||
|
||||
|
||||
JOBSWEEPER
|
||||
- Need a maintenance job class that handles periodic routine internal maintenance CoreJobManager
|
||||
- Submits and handles or hands off routine jobs
|
||||
- Clean up database (look for orphaned records) CoreJobDBMaintenance
|
||||
- Check for license maybe or check license validity? (had some plan to automatically pull license from rockfish, to accomodate monthly rentals) CoreJobLicenseCheck
|
||||
- Clear out completed jobs CoreJobSweeper
|
||||
- Probably dozens of other things
|
||||
|
||||
NOTIFICATION SWEEPER
|
||||
- Maintains notifications tables: cleans out old ones, finds ones that are "stuck", notifies OPS about weirdness if found, removes undeliverable ones, counts retries etc
|
||||
|
||||
|
||||
|
||||
RAVEN JOB SEQUENCE OF OPERATIONS
|
||||
|
||||
Hypothetical Widget UPdated notification job
|
||||
- Widget UPdated
|
||||
- Calls into notification code in WidgetBiz or JobObject or maybe INotifyObject to see if it should create a notification
|
||||
|
||||
|
||||
|
||||
|
||||
HOW AND WHAT v7 GENERATOR DOES
|
||||
|
||||
These functions are called every 5 minutes by Generate in Winform desktop standalone.
|
||||
- GenProcessPM.GeneratePMWorkorders();
|
||||
- GenProcessDeliveries.DeliverNotifications();
|
||||
- GenProcessClientNotifications.DeliverNotifications();
|
||||
|
||||
|
||||
GeneratePMWorkorders
|
||||
- Calls for a list of pm id's of not expired and generate on date in future (for some reason it's just in future, not a specific time range, seems buggy)
|
||||
- Taks the list of id's and passes them to workorder generate from pm which in turn...
|
||||
- Workorder.cs Makes workorders from PM workorders copying pm wo data into service wo data
|
||||
- Workorder.cs After making the service workorder the last bit of code calculates the next pm date and then adjusts the source pm to the next date
|
||||
|
||||
DeliverNotifications (techs)
|
||||
- Get a list via notificationlist :
|
||||
// List of notifications formatted for delivery
|
||||
/// and screened to be deliverable
|
||||
///
|
||||
///
|
||||
///
|
||||
/// Loops through all NotifyEvenRecords
|
||||
/// For pending type events, checks to see if within users selected notification window
|
||||
/// foreach NotifyEventRecord it adds a NotificationListInfo
|
||||
/// for each open delivery window for each subscriber to that event
|
||||
/// As it Processes each deliverable notification it formats it to users
|
||||
/// locale and preferences for size etc and keeps track of the address and
|
||||
/// delivery type.
|
||||
///
|
||||
///
|
||||
/// End result is a list ready to deliver.
|
||||
/// As each one is delivered Ok it should be deleted from the NotifyEvent table by the
|
||||
/// delivery Process that uses this list.
|
||||
- Iterate the list and depending on deliver via method required:
|
||||
- SMS (smtp)
|
||||
- SMTP (email)
|
||||
- POPUP
|
||||
- AYANOVA MEMO
|
||||
- Log delivery and remove event after each item is delivered individually
|
||||
|
||||
|
||||
DeliverNotifications (clients)
|
||||
- Gets a list of client notifications that are ready to deliver (they hold the entire message in EML format)
|
||||
- Attempts smtp server connection, if a problem then logs it and optionally, based on global setting, will bail and retry again later, otherwise proceeds through code which in turn deletes each notification it can't deliver
|
||||
- Delivers via smtp, if fail logs failure. Either way deletes the notification so it's gone forever
|
||||
|
||||
|
||||
71
devdocs/specs/core-import-v7.txt
Normal file
@@ -0,0 +1,71 @@
|
||||
IMPORT FROM V7 SPECS
|
||||
|
||||
CASES
|
||||
- https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3503
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
- LImited area of concern: rather than trying to do all types of import, I'm going to write this as if it's all for v7 only
|
||||
- only when I write the next importer will I see if there are any savings of combining objects, but for now small classes with single responsibility
|
||||
- Import v7 data into RAVEN via datadump plugin for v7
|
||||
- ROUTE: endpoint to allow upload of v7 datadump zip file .
|
||||
- ROUTE: endpoint to allow delete of uploaded datadump file.
|
||||
- ROUTE: endpoint to Show list of datadump files uploaded.
|
||||
- ROUTE: endpoint to trigger non-selective import of specified datadump file (import all)
|
||||
|
||||
- FUTURE - ROUTE: endpoint to download a picklist collection of objects found in dump suitable for user to then select specific objects (or type of objects) to import
|
||||
- FUTURE - ROUTE: endpoint to trigger selective import from an uploaded datadump file
|
||||
- future thing if required because user always has option of editing zip file and removing what is not required to be imported
|
||||
- Supports specifying what to import so can just pick clients or something.
|
||||
- Pick by name of folder I guess within zip since each object type is in it's own folder
|
||||
|
||||
|
||||
|
||||
- IMPORT
|
||||
- user selects all or collection of folders to import from zip
|
||||
- User is returned a jobid that the client can use to display the activity log
|
||||
- Importer opens the archive file and iterates the folders
|
||||
- Each type of object has a corresponding biz object that handles importing that type
|
||||
- So, for example, each client json is handed off to a corresponding ClientBiz for import
|
||||
- Importer updates the opslog as it's doing the import with summary information (3 clients imported successfully etc)
|
||||
- Should close api while it's doing the import.
|
||||
- Datadump files should be in backup files folder
|
||||
|
||||
|
||||
ROLES
|
||||
- Ops ful, biz full can submit jobs
|
||||
- OpsLImited and biz limited can view the jobs (already coded)
|
||||
|
||||
|
||||
OBJECTS
|
||||
- [ImportAyaNova7Controller]
|
||||
- [ImportAyaNova7Biz] object to back controller routes, submit job, run job and pass off import to each biz object in turn that implements:
|
||||
- [IImportAyaNova7Object] interface for each biz object (e.g. ClientBiz)
|
||||
- Import(AyaTypeAndID,JSONDATAfromimportfile)
|
||||
|
||||
|
||||
|
||||
|
||||
OPERATION SEQUENCE
|
||||
|
||||
The upload / delete / show list of datadump files part is standard api stuff and doesn't need special handling nor locking the server
|
||||
Importing
|
||||
- Triggered by ops user remotely by selecting datadump file for import
|
||||
|
||||
|
||||
|
||||
|
||||
SCHEMA
|
||||
|
||||
|
||||
- It would be helpful to have a importmap table that is used temporarily during import to store a map of v7Guid's with their imported RAVEN type and id
|
||||
- This way it can be a lookup table to quickly set other imported data, i.e. client id's to match to units being imported.
|
||||
IMPORTMAP
|
||||
- ObjectType
|
||||
- ObjectId
|
||||
- v7Guid
|
||||
|
||||
|
||||
|
||||
|
||||
228
devdocs/specs/core-license-key-system.txt
Normal file
@@ -0,0 +1,228 @@
|
||||
License key / products
|
||||
|
||||
(see marketing-sales-planning.txt specs doc for specific reasoning behind this)
|
||||
|
||||
|
||||
CURRENT WORKING STATUS:
|
||||
|
||||
2018-06-01
|
||||
Rough framework is in place, can fetch a key and can request a key but nothing really connected at the Rockfish end.
|
||||
Rockfish will need some ui and other code to handle it
|
||||
Putting that off until closer to release as Rockfish will no doubt change before then anyway (also involves a lot of UI work I'm not into right now)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
PROCESS
|
||||
|
||||
LICENSE BOOTSTRAPPING
|
||||
- RAVEN when boots and inits license will ensure a GUID DBID value is set in LICENSE TABLE
|
||||
- This is used to match individual databases to licenses and avoid problems with registration names being the only differentiator as in v7
|
||||
- Also this is the new fetch code
|
||||
- RAVEN DB is empty (of biz data) it's license locked and a User MUST EITHER:
|
||||
|
||||
1) request a trial key for an empty db by filling out a form in RAVEN (or using api tool)
|
||||
- see "trial process" below for details
|
||||
|
||||
|
||||
2) Fetch a paid for license
|
||||
- or, in future purchase right inside RAVEN
|
||||
- RAVEN fetches and installs the license and the rest proceeds as normally
|
||||
|
||||
TRIAL PROCESS
|
||||
==============
|
||||
FOR USER: Install RAVEN, boot, go to client endpoint in browser, get prompted for first test of contact with ROCKFISH server,
|
||||
once that is ok then get prompted to fill out a form to request a trial key or attempt to fetch a key already ready for them
|
||||
User fills out form providing registration name and email address, hits send, told to check email to confirm address then will receive a key after that.
|
||||
They are told they can only request a new trial by erasing all the data first
|
||||
|
||||
FOR CODE:
|
||||
- RAVEN user requests a trial key for an empty db by filling out a form in RAVEN (or using api tool)
|
||||
- registration name, email address are required fields
|
||||
- This request is stored in the db because it may need to be re-sent later on subsequent erasure of biz data
|
||||
- RAVEN makes and stores internally into a new DB a DBID value which uniquely identifies *THAT* database installation
|
||||
- RAVEN transmits the request details (with dbid value) to ROCKFISH
|
||||
- RAVEN starts a repeating job that will periodically check the ROCKFISH server for a license response
|
||||
- ROCKFISH validates the request by verifying the email address provided is (is still) valid and by getting our approval after that.
|
||||
- Verifies email by sending an email and waiting for a response to verify the email is valid
|
||||
- If the email is valid then it creates a trial request record for us to view and approve or not approve with cause entered as text
|
||||
- This part we might automate in future but for now it's a good "fuse"
|
||||
- The DBID value is associated permanently with a site in ROCKFISH
|
||||
- We get a notification of a request sitting in ROCKFISH and we approve it or not
|
||||
- IF APPROVED:
|
||||
- ROCKFISH generates (or looks up if repeat) a customer record, flags it as a trial type customer with a created date (for later GDPR removal or turning into a real customer record)
|
||||
- (ROCKFSIH UI needs a grouping for trial customers for main customer list)
|
||||
- ROCKFISH stores the DBID value in the SITE record
|
||||
- ROCKFISH generates a valid TRIAL license DBID code is the temporary fetch code.
|
||||
- ROCKFISH Ultimately should do this automatically unless we specifically flag something
|
||||
- IF NOT APPROVED
|
||||
- ROCKFISH creates a not approved type of license with the fetch code
|
||||
- ROCKFISH sends and email to the user explaining that it's a fail and why
|
||||
- RAVEN knows a request has been sent and has a license job that checks to see if a license is available periodically
|
||||
- ROCKFISH returns a "no-op" type response if there is no license
|
||||
- ROCKFISH returns a "FAIL" type response if there is a fail with reason code to display to user
|
||||
- RAVEN CANCELS the check for license job and removes the request
|
||||
- ROCKFISH returns a license key when available under the dbid fetch code
|
||||
- ROCKFISH tags the license as "FETCHED" = true so it can't be fetched twice
|
||||
- RAVEN CANCELS the check for license job and attempts to install the key in normal process and initialize
|
||||
|
||||
|
||||
PURCHASE PROCESS
|
||||
================
|
||||
|
||||
|
||||
FOR USER:
|
||||
Assuming ShareIt system still in place:
|
||||
Customer makes a purchase, (possibly through RAVEN in which case the DBID is sent with the url to ShareIt to be filled into the purchase page automatically)
|
||||
Customer enters their DBID as instructed into a box on the purchase page. The order is not valid without it and it's a required field.
|
||||
customer can easily copy their DB ID from within RAVEN
|
||||
|
||||
We get the order and it gets transferred to ROCKFISH, license key is generated and an automated email notification is sent to user instructing that the license is ready for "pickup".
|
||||
User (who is OPS full or limited or BIZ full) goes into the license page in RAVEN UI and selects "Check for new license" at which point the server does the check passing the GUID along with the check, if new license then it installs and inits and informs user.
|
||||
If it's a fail for some reason informs user and no change to current license.
|
||||
|
||||
|
||||
FOR CODE:
|
||||
|
||||
- RAVEN needs a DBID route in licensing controller to return the DBID available to OPS full or read only OR BIZ full rights only.
|
||||
- ROCKFISH needs a way to send a license key email to the customer automatically upon generation
|
||||
- ROCKFISH needs a new key generation page with the new options on it
|
||||
|
||||
|
||||
RENEWAL PROCESS
|
||||
===============
|
||||
|
||||
FOR USER:
|
||||
If perpetual: They get a renewal upcoming notice if yearly (!=ServiceMode) from ROCKFISH so they can update payment info or cancel, RAVEN will start a license check job immediately before expiry
|
||||
If Service: they do nothing, it's understood that it will start to check automatically for a new key around the time of expiry of the old key until it receives a CANCELLED response or a key or a FAIL
|
||||
|
||||
FOR CODE:
|
||||
See below, basically raven will check with rockfish automatically and handle any problems
|
||||
|
||||
|
||||
|
||||
"2018" KEY FORMAT
|
||||
=================
|
||||
- JSON format
|
||||
- secured with hash signature, only we can issue valid keys
|
||||
- Features, All licenseable items (and some configuration features) are in a single "Features" collection scheduledusers, accounting "Service" (meaning it's rental), "Trial" meaning it's a trial etc.
|
||||
- "ServiceMode" feature which indicates the license is for renting not perpetual so that it can trigger more constant check for new licenses or offer rental specific features and functionality
|
||||
- "TrialMode" feature which indicates it's a trial so a different UI can be presented in some areas with sales links etc
|
||||
- Futureproof, can put anything in there
|
||||
- LicenseExpiration date that applies to all features together as a group (no more individual expiry dates)
|
||||
- When the license expires it stops working. NO read only mode, nothing works except some ops routes to install a new license, that's it
|
||||
- MaintenanceExpiration date that applies to support and updates subscription for all features of that license (no individual feature support expiry)
|
||||
- Used by "check for updates" code to see if they are eligable for an update and to auto-update
|
||||
- ID This field is critical and should contain the customer ID followed by the license ID, so for example 34-506987
|
||||
- THESE points here below for this item may be invalid if we go with DBID instead, but keeping as seems to make sense
|
||||
- This way we can verify with automated tools the customer requesting without sending an actual name in text and also that they have the latest key or not
|
||||
- RAVEN says "This is my key 34-506987" to ROCKFISH which replies with "Here is a newer key [key]" or "You are up to date" or "Revoked for reason: [non payment]"
|
||||
- (Test keys are all 00-serialid)
|
||||
- DBID
|
||||
- This field is the unique DB id (GUID) of the database in use that is first generated by RAVEN when it first boots with an empty db
|
||||
- It is stored in the global settings of the database and is never erased once it is set
|
||||
- It is also used in conjunction with the ID field or possibly on it's own to be the fetch code
|
||||
- RegTo: ALL TRIAL KEYS ARE REGISTERED TO SOMEONE NO SUCH THING AS TWO LICENSES IN CIRCULATION WITH THE SAME NAME (i.e. no more "Unregistered trial" meaning it's a trial, every user will have a specific name to test it out)
|
||||
- Rockfish will issue a trial key upon first request from empty db
|
||||
- An empty regto is an invalid key.
|
||||
- LicenseFormat
|
||||
- there will be a LicenseFormat version field which will be initially "2018"
|
||||
- This is for future changes to how license is formatted to ease the code burden of detecting that
|
||||
- Old future release versions should work with new licenses but not the reverse
|
||||
|
||||
|
||||
|
||||
ROCKFISH CODE
|
||||
- FETCHROUTE: Needs a route to automatically check for presence of new licenses from RAVEN, fetch and install them
|
||||
- Will return either a license or an object indicating error or nothing new to return
|
||||
|
||||
|
||||
- UI: Needs a whole lot of ui and code to support
|
||||
- automatic generation of license key from manual and future automated billing
|
||||
- Key stored and served when required with challenge and response system by past key id and customer number maybe
|
||||
automatic billing and renewals but initially generate licenses automatically based on payment info and all that shit
|
||||
|
||||
RAVEN CODE
|
||||
- Raven if an empty db can send a request for a key to Rockfish with a registered name
|
||||
- If db empty on boot set a Guid value in the global settings table that uniquely and permanently identifies that database
|
||||
- RAVEN will have two addresses for fetching a license key to different domains so that we can stay up in case of an issue
|
||||
- i.e. it will try rockfish.AyaNova.com first but if it fails then fallback to rockfish.helloayanova.com as secondary
|
||||
- If there is an existing key RAVEN will automatically check for a new key close to the expiry period of the old key
|
||||
- If a FAIL is returned it will stop checking and tell the user at which point they must manually start the check after fixing the issue
|
||||
- If a NOOP is returned it will reschedule to check again later
|
||||
- If a CANCELLED is returned then the customer is no longer active and it will never check again unless user manually forces a one time check and the license changes
|
||||
- If a license is returned RAVEN will attempt to install it and also clear the running check job and also clear the CANCELLED status
|
||||
|
||||
- RAVEN LICENSE OBJECT / TABLE add a DBID guid column
|
||||
- RAVEN LICENSE OBJECT / TABLE add a LastFetchStatus column corresponding to an enum of FAIL, ACTIVE, CANCELLED
|
||||
- RAVEN LICENSE OBJECT / TABLE add a LastFetchMessage column with the last message from the ROCKFISH license route server (so FAIL can be stored and communicated to user)
|
||||
|
||||
|
||||
USAGE:
|
||||
|
||||
- MUST be able to support monthly billing cycle (automatic license installation or approval so user doesn't have to install key every month)
|
||||
- Rockfish should have a licensed yay or nay or new available route for RAVEN to check periodically and also users can trigger a force check
|
||||
- RAVEN checks periodically for new license to fetch in line with billing cycle or expiry cycle.
|
||||
- SAFE fallback if can't contact license server and allow a few misses before something kicks in, but not to allow people to use unlimited by blocking rockfish for example
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
PRODUCT LICENSE CHANGES FROM v7
|
||||
|
||||
|
||||
Still optional and purchased separately:
|
||||
QBOI, QBI, PTI
|
||||
|
||||
Possible idea: sold as "Accounting add on" and the user can use one of those of their choice and can switch which means 1 product instead of three which might make keys easier.
|
||||
Possible downside is that we can't track who uses what or what we sell how much of so puts a kink in marketing or planning for deprecation or where to spend
|
||||
effort, however there are other ways of addressing that.
|
||||
|
||||
|
||||
//INCLUDED ADD-ON's
|
||||
OLI - INCLUDED / TURNED INTO GENERIC FEATURE IMPORT / EXPORT CONTACTS and SCHEDULE TO ICAL
|
||||
OutLookScheduleExport - INCLUDED / TURNED INTO GENERIC SCHED EXPORT ICAL FORMAT
|
||||
Export to xls - Included with RAVEN
|
||||
QuickNotification - Included with RAVEN
|
||||
ImportExportDuplicate - Included with RAVEN
|
||||
RI/WBI/MBI - UNNECESSARY with RAVEN
|
||||
|
||||
|
||||
This all brings up the matter then that we might only have two things to license: the number of users and whether there is accounting or not.
|
||||
?? PROS and CONS INLINE WITH RAVEN DESIGN GOALS ??
|
||||
|
||||
|
||||
|
||||
|
||||
THOUGHTS
|
||||
Raven license key system specs:
|
||||
|
||||
|
||||
- CONSIDER supporting non-connected scenario where user is not internet connected
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
???
|
||||
- Don't worry about obfuscation at first (maybe ever, I mean, really)
|
||||
- Need automated routine that checks with rockfish to fetch a license automatically when necessary
|
||||
- I.E. it's sub runs out so it starts checking if there is a newer license available and if so fetches it
|
||||
- Need an alternative route and UI in Rockfish for RAVEN style license handling
|
||||
- Must use a different signing key than AyaNova 7 so we don't expose v8 key stuff, (although, it's just the public part of the key to validate right?)
|
||||
=====================================
|
||||
|
||||
RESEARCH SOURCES
|
||||
- http://www.reprisesoftware.com/blog/2017/08/implement-a-recurring-revenue-license-model/
|
||||
- https://www.linkedin.com/pulse/20140819084845-458042-the-change-from-perpetual-to-subscription-based-software-licensing
|
||||
- http://software-monetization.tmcnet.com/articles/429528-long-but-rewarding-road-a-recurring-revenue-model.htm
|
||||
- How to calculate pricing in a service business: https://www.patriotsoftware.com/accounting/training/blog/how-pricing-services-strategies-models-formula/
|
||||
- SAAS pricing models: https://www.cobloom.com/blog/saas-pricing-models
|
||||
103
devdocs/specs/core-localization.txt
Normal file
@@ -0,0 +1,103 @@
|
||||
# Localization specifications
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
- Keys are text, human readable and as short as possible
|
||||
- Not numeric ID's for these, strictly textual
|
||||
- values may have substitution tokens in them for certain things
|
||||
- DataDump plugin will export and import any custom locales that did not come with AyaNova 7
|
||||
- Dump needs to check if the "stock" locale has been edited or not before exporting
|
||||
- Only edited ones are exported
|
||||
- For example if someone edited the Spanish locale then it would dump as "Spanish-Custom" (or whatever the word for custom is in that language) so as not to interfere with our stock Built in Spanish in Raven
|
||||
- The documented renaming (below) will need to be automated during import of v7 stock locales to migrate to the new key values
|
||||
- Two kinds of locales: Stock and Custom.
|
||||
- Stock locales are stored in db and not user editable
|
||||
- STock locale names are whatever the international name for that locale is like "esp" or "fr" etc
|
||||
- Custom locales are stored in the database and are user customizable
|
||||
|
||||
ROUTES
|
||||
- GET ROUTE that provides a pick list of locales
|
||||
|
||||
- GET ROUTE that returns all key value pairs when requested for a specific locale
|
||||
- This one is for editing purposes or for export to disk
|
||||
|
||||
- GET ROUTE that returns a list of specific key value pairs for a requested locale and specific list of locale keys provided
|
||||
- This one is for day to day ops and will be called on any client opening a new area of UI they have not previously opened
|
||||
|
||||
- PUT ROUTE that accepts a list of key value pairs and a specific locale and updates the current values in db from the provided list
|
||||
- if locale name key provided is one of our stock ones then it errors out as you can't change the stock locales
|
||||
- if locale doesn't exist in db errors out
|
||||
- biz full rights only
|
||||
|
||||
- POST ROUTE that creates a new locale duplicated from an existing locale and copies all the values from the existing locale
|
||||
- Post object {sourceLocale:"English", newLocale:"MyLocale"}
|
||||
- Errors if already exists with that name
|
||||
- Sets it to stock=false so it can be edited
|
||||
- This is also how you rename a locale
|
||||
|
||||
- DELETE ROUTE for deleting any non-stock locale
|
||||
- Can't delete current DB default locale (specfic error)
|
||||
- Users of that locale will be reset to current DB default locale
|
||||
|
||||
|
||||
|
||||
|
||||
CHANGES MADE TO KEYS FROM v7
|
||||
|
||||
- Replaced all [.Label.] with [.]
|
||||
- Replaced all ["O.] with ["] this needs to be a case sensitive change!!!
|
||||
- Removed duplicate key [WorkorderService.CloseByDate] that resulted from last change of O. (the first one between workorderservice and workorderstatus)
|
||||
- Replaced all [.ToolBar.] with [.]
|
||||
- Replaced all [.Toolbar.] with [.]
|
||||
- Replaced all [.Go.] with [.]
|
||||
- Replaced all [.Command.] with [.]
|
||||
- Removed duplicate key created by last operation: [UI.Search] (removed second longer one that refers to database)
|
||||
- Replaced all [.Error.] with [.]
|
||||
- Replaced all [.Object.] with [.]
|
||||
- Replaced all ["UI.] with ["] (and removed exact dupe keys created as a result)
|
||||
- Replaced all [.] with []
|
||||
- Removed dupe WorkorderItemOutsideService (removed the one with the longest value)
|
||||
- Replaced all ["AddressAddress"] with ["Address"]
|
||||
- Replaced all ["ContactPhoneContactPhone"] with ["ContactPhone"]
|
||||
- Replaced all ["ContactPhonePhone"] with ["ContactPhone"]
|
||||
- Replaced all ["PurchaseOrderPurchaseOrder"] with ["PurchaseOrder"]
|
||||
- Replaced all ["WorkorderItemMiscExpenseExpense"] with ["WorkorderItemMiscExpense"]
|
||||
- Replaced all ["WorkorderItemTravelTravel"] with ["WorkorderItemTravel"]
|
||||
|
||||
Note: still some dupes but...fuck it
|
||||
|
||||
|
||||
|
||||
- TODO: As I code, I will select lt keys as required enter them below
|
||||
|
||||
|
||||
|
||||
- TODO: Some of the keys are new and not translated from English, when all is done and the keys that will be carried forward are determined, check for untranslated ones
|
||||
- Use Google translate to get a rough approximation.
|
||||
- A technique to get a good translation would be to try various synonyms out and try to zero in on the commonality in translation to ensure a word is not being completely misunderstood to get a better translation
|
||||
- I.E. if different forms of the phrase result in similar words in the other language then it's probably Gucci
|
||||
|
||||
|
||||
CLIENT
|
||||
- Client fetches localized text on an as required basis form by form and caches it locally until cache is invalidated
|
||||
- Cache invalidated by either a timeout or possibly receiving a message from the server.
|
||||
- Open an edit form
|
||||
- client checks local cache (do I have the values for the list of required keys??)
|
||||
- YES: just use it
|
||||
- NO: Send a list of keys to the server along with the user id that are required for this form and get back the LT, put it in the cache
|
||||
- User id required because someone might edit their locale or the locale name and so it needs to check via the user account what the locale is
|
||||
|
||||
This way there is no wasted space at the client caching stuff that will never be used
|
||||
|
||||
CHANGES:
|
||||
- If the text is changed at the server then a notification should occur for clients using that local to invalidate their cache
|
||||
- Although, that would be a pretty rare event so...maybe not so much, a logout could clear the cache or a login I guess
|
||||
|
||||
|
||||
|
||||
LOCALIZED TEXT KEYS ACTUALLY USED
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
NewKeyValue, OldKeyValue
|
||||
------------------------
|
||||
|
||||
HelpLicense, UI.Help.License
|
||||
29
devdocs/specs/core-log-business.txt
Normal file
@@ -0,0 +1,29 @@
|
||||
Business history log
|
||||
|
||||
FROM CASE 79
|
||||
|
||||
A central event log used to track changes to business objects and events of significance in AyaNova.
|
||||
|
||||
Auto prunes (can be set)
|
||||
|
||||
Has some sort of checksum or verification so we can tell it wasn't fucked with
|
||||
|
||||
Consumed by various widgets for record history purposes
|
||||
Each object defines it's own set of event id's of significance (int enum) in addition to some events common to all objects:
|
||||
|
||||
1=created
|
||||
2=modified
|
||||
3=deleted
|
||||
|
||||
|
||||
|
||||
EVENT LOG DB SCHEMA
|
||||
------------------------------------
|
||||
AYTYPE (object type int),
|
||||
AYID (object id),
|
||||
AYEVENT (event of interest type int defined in object),
|
||||
TIMESTAMP (unix epoch),
|
||||
USERID,
|
||||
TEXTRA (text field to identify stuff that can't be retrieved from source object, i.e. deleted record name)
|
||||
|
||||
|
||||
14
devdocs/specs/core-log-security.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
Security history log
|
||||
|
||||
FROM CASE 1998
|
||||
|
||||
|
||||
A central event log used to track security related events of significance in AyaNova.
|
||||
|
||||
- Authentication events (login, logoff)
|
||||
- User creation / deletion
|
||||
- User role changes
|
||||
|
||||
|
||||
Auto prunes (can be set)
|
||||
Has some sort of checksum or verification so we can tell it wasn't fucked with
|
||||
76
devdocs/specs/core-long-running-operations.txt
Normal file
@@ -0,0 +1,76 @@
|
||||
JOBS / LONG RUNNING OPERATIONS SPECS
|
||||
|
||||
|
||||
|
||||
CASES
|
||||
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
- An endpoint to view jobs in the table
|
||||
- An endpoint to view logs of jobs
|
||||
- A db schema (see below)
|
||||
- biz object callable from other biz objects with following functionality
|
||||
- Submit jobs
|
||||
- Remove jobs
|
||||
- Remove all jobs for object type and id (called when deleting an object)
|
||||
- Update status of jobs
|
||||
- Log ops of jobs
|
||||
|
||||
|
||||
|
||||
- OPERATIONS endpoint:
|
||||
- GET JOB LIST Check a list of jobs (with rights to do so). Works even when api is locked.
|
||||
- GET /operations returns list of jobs sorted by lastActionDateTime or createdDateTime via query parameter
|
||||
- {
|
||||
"jobid":"1234",
|
||||
"createdDateTime": "2015-06-19T12-01-03.45Z",
|
||||
"lastActionDateTime": "2015-06-19T12-01-03.45Z", //<----determined from log, not stored in AOPSJOB
|
||||
"name":"Send notifications | Import data | Backup | Restore",
|
||||
"exclusive":"true/false",
|
||||
"status": "sleeping | notstarted | running | succeeded | failed",
|
||||
"log":"/operations/log/1234"
|
||||
}
|
||||
|
||||
- ROUTE: "OPERATIONS" endpoint to get log of long running operation /operations/log/1234
|
||||
- [
|
||||
{
|
||||
"DateTime": "2015-06-19T12-01-03.45Z",
|
||||
"Entry": "Imported 4 clients, 3 were duplicates and ignored"
|
||||
},
|
||||
{
|
||||
"DateTime": "2015-06-19T12-02-03.45Z",
|
||||
"FAILED to import 5 workorders - data format invalid"
|
||||
},
|
||||
{
|
||||
"DateTime": "2015-06-19T12-04-03.45Z",
|
||||
"Operation completed with errors"
|
||||
}
|
||||
]
|
||||
|
||||
- FUTURE: delete jobs that have not started yet
|
||||
- FUTURE: cancel jobs and cancellation token
|
||||
|
||||
|
||||
|
||||
SCHEMA
|
||||
=-=-=-=
|
||||
|
||||
AOPSJOB
|
||||
- jobid long NOT NULL INDEXED UNIQUE (initially I'll use linux epoch when job created, used to tag logs etc, but keeping this open for a change later)
|
||||
- OwnerId NOT NULL
|
||||
- Created NOT NULL
|
||||
- Exclusive NOT NULL bool (true=close api and don't run any other jobs, false=process async and keep api open)
|
||||
- StartAfter NOT NULL INDEXED (datetime to start the job, in cases of start now jobs the date will be minvalue)
|
||||
- jobtype enum int NOT NULL of the jobtype which is an enum of all possible job types (i.e. NotifyClosed)
|
||||
- ObjectId NULL source of job object id (i.e. workorder id)
|
||||
- ObjectType NULL source of job object type (i.e. workorder)
|
||||
- descriptive name text NOT NULL for display in UI only, isn't filtered
|
||||
- jobstatus enum int NOT NULL type (one of "sleeping | notstarted | running | succeeded | failed")
|
||||
- jobinfo text NULL (json string of extra info required for job, maybe the name of an import file or who knows what, anything really can be put in here as long as it's shortish)
|
||||
|
||||
AOPSJOBLOG
|
||||
- jobid long not null
|
||||
- created not null (indexed? Will be ordered by this a lot but it's the natural order...no?)
|
||||
- statustext NOT NULL
|
||||
40
devdocs/specs/core-notification.txt
Normal file
@@ -0,0 +1,40 @@
|
||||
# Notification specifications
|
||||
|
||||
|
||||
SCRATCHPAD IDEAS
|
||||
=-=-=-=-=-=-=-=-=-
|
||||
Hypothetical sequence of operations for designing raven notification and tie into jobs and processor
|
||||
|
||||
WidgetStatusChange notification
|
||||
- OnChange event triggered in [WIDGETBIZ] with before and after state of widget in question (immediately after save or delete or whatever is of interest)
|
||||
- OnChange processor See if any users are subscribed to this event [CENTRAL_NOTIFICATION_CLASS THIS MUST BE SUPER EFFICIENT AS IT WILL BE HAMMERED] (events of interest / core central event bus handler of some kind??)
|
||||
- CENTRAL_NOTIFICATION_CLASS event of interest should cache in memory events of interest and trigger cache invalidation by [EVENT_OF_INTEREST_CLASS] subscription change
|
||||
- If no then bail out early
|
||||
- If yes then compares the before and after state of the widget and the given list of events of interest and processes notifications in turn
|
||||
- Create a notification event in a table of pending notification events [CENTRAL_NOTIFICATION_CLASS]
|
||||
- See v7 schema for ideas
|
||||
|
||||
Deliver notifications
|
||||
- Job triggers and HandleJob is called on [CENTRAL_NOTIFICATION_CLASS] which in turn checks if any events ready to Deliver
|
||||
- Hands off NOTIFY_EVENT to deliver one at a time to a [NOTIFICATION_DELIVERY_CLASS] which in turn calls each [NOTIFICATION_DELIVERY_DELIVERY_TYPE-SMPT/POPUP/WHATEVER]
|
||||
|
||||
|
||||
Maintenance
|
||||
- [CoreJobNotificationSweeper class] maintains the notifications tables see generator spec for thoughts
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
MISC
|
||||
=-=-
|
||||
|
||||
- NOTIFICATION SUBSCRIPTIONS
|
||||
- NOTIFICATION DELIVERIES (user or all if manager account)
|
||||
|
||||
- What is "Slack"?
|
||||
- should we tie into it for notifications?
|
||||
62
devdocs/specs/core-ops-metrics.txt
Normal file
@@ -0,0 +1,62 @@
|
||||
SYSOPS METRICS / HEALTH CHECKS
|
||||
|
||||
|
||||
|
||||
|
||||
Right now as of May 7th 2018 here are the remaining outstanding issues for metrics:
|
||||
|
||||
TODO OUTSTANDING ISSUES
|
||||
|
||||
1) Need to make my own dashboard for non endpoint stats for Graphana. Actually a dashboard that covers all AyaNova would be good
|
||||
https://www.influxdata.com/blog/how-to-use-grafana-with-influxdb-to-monitor-time-series-data/
|
||||
2) Save the dashboard as JSON text for the manual
|
||||
3) See about making my own Grafana / INfluxdb container and include it in compose.yml for AyaNova server so can deploy it easily (with my own panels pre-built)
|
||||
4) DOCUMENT
|
||||
5) Skim below and see if I have covered it all.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
OLD OLD OLD OLD
|
||||
This is old stuff I was using during research and initial implementation some of it may still be relevant later
|
||||
=-=-=-=-=-=-=-=-=-
|
||||
|
||||
APRIL 26 2018 - DID SOME RESEARCH, THIS IS ACTUALLY A VERY COMPLEX TOPIC AND BEST HANDLED WITH A 3RD PARTY TOOL
|
||||
- There is an open source metrics tool and an open source db it can work with the is a time series data store (influxdb, elasticsearch) designed for exactly this scenario
|
||||
- Influxdb has a docker container available
|
||||
- Shitty thing is I would need some of this information for support purposes built in, not requiring some fancy 3rd party tools which are very cool for a large setup,
|
||||
but a small one man show doesn't require that.
|
||||
- Perhaps RAVEN can have a big corporate edition that is all intended to be containerized and comes with influxdb and preconfigured with metrics on.
|
||||
- It handles both metrics and "HEALTH CHECK" issues in one package
|
||||
|
||||
- I'm not sure if this is a v1.0 feature, though it would help in development to see what's what route writes
|
||||
- If it can be an optional thing that can be turned on then that would be ideal
|
||||
- https://al-hardy.blog/2017/04/28/asp-net-core-monitoring-with-influxdb-grafana/
|
||||
- https://www.app-metrics.io/
|
||||
|
||||
|
||||
|
||||
Ops Metrics
|
||||
|
||||
- CASE 3502 Add metrics
|
||||
- CASE 3502 Metric: record count in each table or at least major ones as a snapshot metric so can compare month to month.
|
||||
- CASE 3497 ACTIVE user count - Log user login, last login and login per X period
|
||||
- CASE 3499 "Slow" I want to know if anything is slow, not what the user says but what the code determines
|
||||
|
||||
|
||||
|
||||
|
||||
- some kind of internal metrics to track changes over time in operations with thresholds to trigger logs maybe?
|
||||
- Has to be super fast, maybe an internal counter / cache in memory and a periodic job that writes it out to DB, i.e. don't write to db metrics on every get operation etc
|
||||
- Average response time?
|
||||
- Busyness / unique logins or tokens in use? A way to see how many distinct users are connecting over a period of time so we know how utilized it is?
|
||||
- Utilization?
|
||||
- Areas / routes used in AyaNova and how often / frequently they are used (we could use this for feature utilization)
|
||||
- CPU peak usage snapshot
|
||||
- Disk space change over time snapshots
|
||||
|
||||
|
||||
HEALTH CHECKS
|
||||
- Comes with appmetrics:
|
||||
- https://al-hardy.blog/2017/04/17/asp-net-core-health-checking/
|
||||
83
devdocs/specs/core-ops-support-info-log.txt
Normal file
@@ -0,0 +1,83 @@
|
||||
SYSOPS HEALTH CHECK / METRICS
|
||||
|
||||
OK, considered this and a log is a log and all logs are relevant to sysops people so I'm going to treat all logging the same regardless and make an effort to ensure each log entry
|
||||
is tagged with the relevant class name
|
||||
|
||||
CRITICAL ISSUES
|
||||
- Check for critical issues in a health check periodic job which also logs and metrics
|
||||
- Critical issues should be logged first then sent via notification for system operators if subscribed
|
||||
-
|
||||
|
||||
METRICS
|
||||
- metrics should be gathered in DB and reported on via UI for ops users and potentially in other formats down the road
|
||||
|
||||
|
||||
|
||||
TODO LIST OF THINGS CODED THAT NEED TO BE LOGGED
|
||||
- Items in code tagged with this:
|
||||
- //TODO: core-log-sysop
|
||||
- Generator failures
|
||||
- IJobBiz derived objects failures
|
||||
|
||||
- configuration changes ???
|
||||
- Install and uninstall feature changes
|
||||
- Warnings (low disk space, slowness monitoring, db issues) (during health check JOB??)
|
||||
|
||||
|
||||
"HEALTH CHECK" JOB
|
||||
- things that need to be metric a sized are commented with //OPSMETRIC
|
||||
- Maybe a "health check" job or "checkup" job that periodically asseses things and reports findings
|
||||
- works in conjunction with metrics gathered maybe?
|
||||
- Metrics would be a system that for example could get free disk space then get it again a few days later and project ahead to getting low and warning or simple when down to 10% warn or etc
|
||||
- Anything we'd like to see from a support point of view would be useful too
|
||||
- Go over the research doc to see what was recommended
|
||||
- Dig up that guys example project on his blog that he was going to add metrics to.
|
||||
- Brainstorm a list of recent support issues and what could be a benefit in dealing with them
|
||||
- "Slowness" comes up a lot.
|
||||
|
||||
|
||||
Ops Metrics
|
||||
CONFIRMED REQUIRED
|
||||
- Gather in memory and flush to db on a schedule is best
|
||||
- CASE 3562 If found, count of mismatch of attached files in database vs file system
|
||||
- CASE 3523 Log major ops related configuration changes (before and after snapshot)
|
||||
- CASE 3502 Log feature or route or endpoint usage count as a snapshot metric so can compare month to month.
|
||||
- CASE 3502 Log record count in each table or at least major ones as a snapshot metric so can compare month to month.
|
||||
- CASE 3497 ACTIVE user count - Log user login, last login and login per X period
|
||||
- CASE 3499 "Slow" I want to know if anything is slow, not what the user says but what the code determines
|
||||
|
||||
RESEARCH / IDEAS / EXAMPLES
|
||||
- Metric types:
|
||||
- https://www.app-metrics.io/getting-started/metric-types/
|
||||
- Code example that deals with this issue:
|
||||
- https://github.com/AppMetrics/AppMetrics/tree/dev/src/App.Metrics.Core
|
||||
- Need more than one window into the data, for example we need a last few minutes (5?) view so people can see at a glance what is happening NOW
|
||||
- But also need to know what was it historically. So maybe we need a NOW algorithm but also a HISTORICAL algorithm.
|
||||
- Maybe a sliding scale of recency, so a 5 minute view, a THIS WEEK view and then a month to month view beyond that??
|
||||
- LIBRARIES
|
||||
- Health check Health Checks give you the ability to monitor the health of your application by writing a small tests which returns either a healthy, degraded or unhealthy result.
|
||||
- https://www.app-metrics.io/health-checks/
|
||||
- APP METRICS
|
||||
- https://github.com/AppMetrics/AppMetrics
|
||||
- Different types of metrics are Gauges, Counters, Meters, Histograms and Timers and Application Performance Indexes
|
||||
- METRICS of a system:
|
||||
- Network. Network metrics are related to network bandwidth usage.
|
||||
- System. System metrics are related to processor, memory, disk I/O, and network I/O.
|
||||
- Platform. Platform metrics are related to ASP.NET, and the .NET common language runtime (CLR).
|
||||
- Application. Application metrics include custom performance counters "Application Instrumentation".
|
||||
- Service level. Service level metrics are related to your application, such as orders per second and searches per second.
|
||||
- USEFUL INFO HERE FOR SYSTEM METRICS LIKE MEMORY ETC: This document from Microsoft gives generally accepted limits for things like CPU threshold, memory etc in actual percentages
|
||||
- Section "System Resources" here https://msdn.microsoft.com/en-us/library/ff647791.aspx#scalenetchapt15_topic5
|
||||
|
||||
- USEFUL EXAMPLE dashboard for web applications:
|
||||
- https://sandbox.stackify.com/Stacks/WebApps
|
||||
|
||||
|
||||
- some kind of internal metrics to track changes over time in operations with thresholds to trigger logs maybe?
|
||||
- Has to be super fast, maybe an internal counter / cache in memory and a periodic job that writes it out to DB, i.e. don't write to db metrics on every get operation etc
|
||||
- Average response time?
|
||||
- Busyness / unique logins or tokens in use? A way to see how many distinct users are connecting over a period of time so we know how utilized it is?
|
||||
- Utilization?
|
||||
- Areas / routes used in AyaNova and how often / frequently they are used (we could use this for feature utilization)
|
||||
- CPU peak usage snapshot
|
||||
- Disk space change over time snapshots
|
||||
27
devdocs/specs/core-regions.txt
Normal file
@@ -0,0 +1,27 @@
|
||||
REGION SPECS
|
||||
|
||||
|
||||
|
||||
CASES
|
||||
|
||||
REGIONS:CLIENT:NOTIFICATION:GENERAL: - REGION feature changes case
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3454
|
||||
|
||||
REGIONS:GENERAL: - denormalize region id in each regionalized object
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3476
|
||||
|
||||
QUOTE:NOTIFICATION:CR: - Notifications for quote creation should be regionalized (or tags?)
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3564
|
||||
|
||||
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
|
||||
Definitive decision on regions:
|
||||
Regions will be deprecated as a feature entirely, no region feature will be in RAVEN / v8.
|
||||
Old data will import regions as tags, first the region will be imported as a tag then anything that is imported of that region will be tagged with that region.
|
||||
Notifications will work via tags instead, users will be able to filter a notification to anything tagged with that tag.
|
||||
Essentially being able to filter out a user from seeing data outside their region is not going to be a feature going forward.
|
||||
Roles will be the primary way to restrict what users see and various filters by tag for certain ops like notification etc.
|
||||
11
devdocs/specs/core-reporting.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
REPORTING SPECS
|
||||
|
||||
CASES
|
||||
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
- All v7 reports ported to RAVEN
|
||||
- ALL Fields even the ones that don't show on the report but are available for adding to a report in the editor need to be available
|
||||
|
||||
72
devdocs/specs/core-roles.txt
Normal file
@@ -0,0 +1,72 @@
|
||||
# Roles specifications
|
||||
|
||||
From case https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/1809
|
||||
|
||||
RAVEN will replace security rights system of v7 with a role based system instead
|
||||
I'm using an int flags enum which means a maximum of 32 possible roles unless I bump it up to a long but don't really want to as this number will be thrown around the api a lot
|
||||
|
||||
|
||||
|
||||
TODO: Fill this out as I code.
|
||||
|
||||
**DELETE RIGHTS***
|
||||
If you can modify an object you can delete an object
|
||||
|
||||
|
||||
**OWNER LIMITED ROLES**
|
||||
Limited roles in some cases can create an object but can only edit or delete objects they created
|
||||
|
||||
## ROLES
|
||||
|
||||
### None
|
||||
No rights, not settable, just for internal usage in code
|
||||
|
||||
### BizAdminLimited
|
||||
Intended for a business administrator / supervisor who wants to monitor the business, kpi, reporting etc, but doesn't actually get to change anything.
|
||||
Suitable for the "big boss" who isn't trusted to make actual day to day decisions but can review anything.
|
||||
|
||||
**RIGHTS**
|
||||
- Read only access to everything (except OPS stuff)
|
||||
- Full access to management reporting, KPI etc, but can't change them substantially, just sort, filter etc.
|
||||
|
||||
|
||||
### BizAdminFull
|
||||
|
||||
Basically the v7 manager account stuff with full rights to everything other than OpsAdmin stuff.
|
||||
|
||||
**RIGHTS**
|
||||
- Full access to all AyaNova objects with the sole exception of OPS related stuff
|
||||
- Grants roles to other users
|
||||
- Licensing
|
||||
- Business related configuration settings
|
||||
- All management and KPI stuff
|
||||
|
||||
### DispatchLimited
|
||||
|
||||
### DispatchFull
|
||||
|
||||
### InventoryLimited
|
||||
|
||||
### InventoryFull
|
||||
|
||||
### Accounting
|
||||
|
||||
### TechLimited
|
||||
|
||||
### TechFull
|
||||
|
||||
### SubContractorLimited
|
||||
|
||||
### SubContractorFull
|
||||
|
||||
### ClientLimited
|
||||
|
||||
### ClientFull
|
||||
|
||||
### OpsAdminLimited
|
||||
|
||||
### OpsAdminFull
|
||||
backup, troubleshoot, dashboard of throughput, db administration, all the stuff needed to keep RAVEN up and running and monitor any issues in operations of it, nothing to do with business stuff or actual business data
|
||||
|
||||
|
||||
|
||||
99
devdocs/specs/core-search.txt
Normal file
@@ -0,0 +1,99 @@
|
||||
SEARCH SPECS
|
||||
|
||||
|
||||
|
||||
CASES
|
||||
|
||||
SEARCH: - to have ability to filter by client
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/1503
|
||||
|
||||
SEARCH:UI:GENERAL: - Search in V8: should be integrated, not a separate form
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/1672
|
||||
|
||||
SEARCH:NEW: - Search dictionary: Auto remove orphaned words
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/1878
|
||||
|
||||
WORKORDER:PARTS:SEARCH:CR:DUPLICATE 3358: - Parts Search on Parts Grid
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3310
|
||||
|
||||
WORKORDER:PARTS:SEARCH:CR:DUPLICATE 3310: - Ability to ‘search’ for a part while in a WO or PM
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3358
|
||||
|
||||
UI:WORKORDER:CR: - Have a search box for clients instead of having to use the slider to find a client
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3376
|
||||
|
||||
SEARCH:UI:GENERAL: - Be able to search from anywhere in any screen
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3459
|
||||
|
||||
SEARCH:WORKORDER:PO:QUOTE:PM: - Workorder and other numbered items need to be searchable by their number
|
||||
https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3506
|
||||
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
- USE-CASE: Central text search for any match. Can include tags. Can specify a type of object.
|
||||
- USE-CASE: In-object text search for the typeandid that user is in, e.g. when in Client info form can search on that client.
|
||||
- USE-CASE: Picklist / chooser Search for text of a specific type of object, e.g. find all Clients that contain "squirrel" to drive picklists everywhere
|
||||
- USE-CASE: No snippet (excerpts) just name and type. I think for the initial release I'm *not* going to include snippets with result for several reasons:
|
||||
- Performance: better to get an immediate result than to wait a while to get excerpts that
|
||||
- Bandwidth: will trigger a lot of probably unneeded Bandwidth and chew up cycles on the db and server
|
||||
- Can be added later based on feedback (i.e. a lot of bitching)
|
||||
- For performance it would be something that runs *after* the search results are returned, either on demand (user clicks on excert button, or slowly fills in the list async)
|
||||
|
||||
- NAME: in search index include a bool indicating the word is actually part of the name or equivalent of the object, this will make name searches WAAAAYYY easier!!!
|
||||
- in non named objects it's whatever the primary identifier is, i.e. workorder number for a workorder, partnumber for a part
|
||||
- Maybe all objects should have a "name" column in db be it a workorder number or part number just for consistency
|
||||
- TAGS: Any search, anywhere, can include tags (internally it will post filter on tags after initial text search or if no text then will just search tags)
|
||||
|
||||
- ROLES: Needs to internally be able to filter by CANREAD property of role user is in (e.g. if no rights to read client no clients returned)
|
||||
- INDEX VISIBLE ID NUMBERS MUST be searchable so ensure they get put into text search with the regular text.
|
||||
- OBJECT ID and/or OBJECT TYPE criteria support (AyaTypeId must be included in search index)
|
||||
- PARENT / OPENABLE OBJECT: Objects that are searchable but not openable directly need to contain their parent type and id, this way we can follow a chain to the openeable object if necessary
|
||||
- This is in the object, not in the searchkey as it would be inefficient
|
||||
- Need parent AyaType as an ENUM ATTRIBUTE in the AyaType table for easy traversal
|
||||
- CLEANUP: Generator able to cleanup index with no matching word (if possible), index with no matching typeandid
|
||||
- CJK INDEX support: same as v7
|
||||
- GROUP BY: group by objectype then objectid (then created date?)
|
||||
- Coding: break this into separate discrete classes, the old v7 code is very monolithic and in-elegant
|
||||
- SAMPLE DATA: Need a huge amount of sample data indexed to load test it
|
||||
- INDEXES: play with it and see what works best
|
||||
|
||||
|
||||
|
||||
PROPOSED SCHEMA
|
||||
asearchdictionary
|
||||
- id (long)
|
||||
- word (nvarchar 255) (indexed?)
|
||||
|
||||
asearchkey
|
||||
- id (long)
|
||||
- wordid (fk on asearchdictionary.id)
|
||||
- objectid (long id of source object)
|
||||
- objecttype (AyaType as int of source object)
|
||||
- inname (bool indicates the search word was in the name of the object)
|
||||
|
||||
|
||||
|
||||
|
||||
REFERENCE INFO
|
||||
|
||||
V7 Code:
|
||||
- Word breaker: AyaBizUtils -> Break starting at line 1976
|
||||
- Insert into db: DBUtil.cs -> ProcessKeywords starting at line 423
|
||||
- Usage: Client.cs line 2104
|
||||
- SearchResultList.cs (whole class, also there is an "ri" version for some reason I forget)
|
||||
- V7 DB Schema:
|
||||
|
||||
CREATE TABLE [dbo].[ASEARCHDICTIONARY](
|
||||
[AID] [uniqueidentifier] NOT NULL,
|
||||
[AWORD] [nvarchar](255) NOT NULL
|
||||
) ON [PRIMARY]
|
||||
|
||||
|
||||
CREATE TABLE [dbo].[ASEARCHKEY](
|
||||
[AWORDID] [uniqueidentifier] NOT NULL,
|
||||
[ASOURCEOBJECTID] [uniqueidentifier] NOT NULL,
|
||||
[ASOURCEOBJECTTYPE] [smallint] NOT NULL
|
||||
) ON [PRIMARY]
|
||||
|
||||
6
devdocs/specs/core-seeds.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
SEEDS
|
||||
EXPORT READY (DATA DUMP CODED)
|
||||
|
||||
REQUIREMENTS
|
||||
See case 3544
|
||||
|
||||
30
devdocs/specs/core-server-state.txt
Normal file
@@ -0,0 +1,30 @@
|
||||
SERVER STATE SPECS
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
Two parallel paths that can lead to serverstate affecting access to server:
|
||||
|
||||
Closed or Open States
|
||||
- If closed all routes are shut to all users with case by case exceptions:
|
||||
- OPS type user exceptions:
|
||||
- Login
|
||||
- View long running jobs because server may be closed due to long running process they need to view for status updates
|
||||
- they can fetch a license key or look at license current state (ONLY IF CLOSED DUE TO LICENSE ISSUE)
|
||||
- View METRICS and log files
|
||||
|
||||
|
||||
- SYSTEM_LOCK
|
||||
- An independent setting outside of the regular server state that allows RAVEN to internally lock itself when License or lockout related issues like non-payment
|
||||
- Acts as though the server was set to CLOSED so OPS can still go in but doesn't matter what state they set it to because the locked is a parallel in memory internal only setting
|
||||
- All non-ops routes will need to see if closed and server state returns closed both if serverstate is closed or if SYSTEM_LOCK
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Some scenarios at the server require general users to be LOCKED OUT COMPLETELY but still grant limited access for operations administration, such as:
|
||||
- Biz changes that need all users out such as a re-org of client data or something else business related
|
||||
- Ops changes that need all users out such as upgrades, backup, restore, any mass data change such as import or export
|
||||
- Emergency security issues such as hacking attempt etc
|
||||
|
||||
Need a method to inform users that server *WILL* be going down within a set time frame, like 15 minutes (no need to get fancy and make it configurable)
|
||||
74
devdocs/specs/core-tags.txt
Normal file
@@ -0,0 +1,74 @@
|
||||
# TAGS specifications
|
||||
|
||||
Main case is 3373 https://rockfish.ayanova.com/default.htm#!/rfcaseEdit/3373
|
||||
|
||||
FORMAT
|
||||
=-=-=-
|
||||
Copied from stack overflow
|
||||
tags ...
|
||||
|
||||
must be no longer than 35 characters
|
||||
spaces are replaced by dashes, no spaces in a tag
|
||||
always converts to lower invariant culture
|
||||
- (probably not this, utf-8 ok: must use the ascii character set a-z 0-9 + # - .)
|
||||
|
||||
|
||||
SCHEMA
|
||||
=-=-=-
|
||||
Two tables:
|
||||
|
||||
TAGS
|
||||
- name text not null lowercase only
|
||||
- id bigserial
|
||||
- OwnerId
|
||||
- Created
|
||||
TAGMAP
|
||||
- ObjectId
|
||||
- ObjectType
|
||||
- TagId
|
||||
|
||||
INDEXES
|
||||
- Initial index is on the name of the tag as that will be searched for??
|
||||
- After some research I think it's best to just make it, populate it with a great deal of test data and then test query it and see what indexes give the best performance
|
||||
|
||||
|
||||
|
||||
USE-CASES
|
||||
Add a tag to an object type and id, start typing and a selection list fills in to select from
|
||||
- Don't allow the same tag more than once
|
||||
- Create a tag if not present (rights?)
|
||||
Show tags on an object
|
||||
Find any type or specific type items by a set of tags to include and a set of tags to exclude (i.e. has "red, yellow" but not "green")
|
||||
Search for text must allow specifying tags to refine
|
||||
Reporting will require filtering sources of report data by tags
|
||||
|
||||
METHODS REQUIRED IN TAG CONTROLLER
|
||||
|
||||
- GET tag text by id
|
||||
- GET tag id by tag text
|
||||
- CREATE tag for tagging something
|
||||
- REMOVE tag (and all tagmap entities)
|
||||
|
||||
|
||||
- GET TAGMAPS LIST (get all object/id entities with this tag)
|
||||
- CREATE TAGMAP Apply tag to an object / id
|
||||
- REMOVE TAGMAP remove tag from object/id
|
||||
- GET TAGS for object (name id list, main display route)
|
||||
|
||||
|
||||
ROLES / RIGHTS
|
||||
- Limited roles can tag stuff and remove tags as per their rights to the object type in question but can't make new tags or change existing tags
|
||||
- Full roles can make new tags and can edit or delete existing tags
|
||||
|
||||
|
||||
RETRIEVAL
|
||||
|
||||
Will need to query tags as follows:
|
||||
|
||||
ObjectType and Id list of tags (most common)
|
||||
Objects with one or more tags
|
||||
Objects that have a set of tags but do not have another set of tags
|
||||
Objects of a certain type but any id that have a certain tag
|
||||
|
||||
|
||||
|
||||
15
devdocs/specs/core-testing.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
TESTING
|
||||
|
||||
BACK-END
|
||||
- Back end api is currently tested via the api and unit tests that exercise it's routes
|
||||
TODO
|
||||
- Some kind of continual load test of the DO server, try to break it with a lot of tests continually running for a long period of time
|
||||
- A test that simulates multiple simultaneous users at the same time
|
||||
|
||||
|
||||
|
||||
FRONT-END
|
||||
TODO
|
||||
- Integration tests that excercise the front end and ensure things appear where they are supposed to given certain tasks
|
||||
- Ideally it would be able to run a set of business tasks against the UI and confirm at each point like make a rando customer and then a workorder and on
|
||||
- Something that we can leave running for a long period of time to verify load and no leaks
|
||||
36
devdocs/specs/core-trial-evaluation-system.txt
Normal file
@@ -0,0 +1,36 @@
|
||||
Trial and acquire features
|
||||
|
||||
Raven will no longer be a download and try anonymously application.
|
||||
There are only two ways for a non customer to trial: immediately online or self host. Both methods require the user to register to trial and the key will be automatically fulfilled.
|
||||
In no case will a user be able to install a trial key without the database being erased to prevent serial trial scamming.
|
||||
|
||||
LICENSED CUSTOMER ADD-ON TRIAL
|
||||
Users that are already licensed and just want to try out a feature will get a normal licensed key with the feature licensed as normal but with short expiry like 30 days.
|
||||
Once the feature licensed expires it's not offered in the UI anymore (or it says "A license is required to use this feature")
|
||||
This way we do not need to replace the key again with a non-licensed version of the trialed add-on.
|
||||
|
||||
PROSPECT ONLINE FULL TRIAL
|
||||
Anyone visiting our website who is a prospective customer will be able to trial AyaNova immediately by filling out a form after which a new AyaNova will be spun up with a fixed time limit with full license registered to them but as a trial.
|
||||
Inside the trial they will be able to seed data for various scenarios at will
|
||||
They can purchase at any time and we will activate it into the online version already set up so they can just start working
|
||||
When the time limit expires plus let's say another month the database will be automatically erased and server spun down (container deleted from docker?)
|
||||
|
||||
|
||||
PROSPECT SELF HOST FULL TRIAL
|
||||
|
||||
When a prospect user wants to trial self host, they will be able to download and install and it will start in unlicensed mode where they can verify it's installed and ready but there is no license so it won't allow normal ops,
|
||||
The only options will be to confirm it's working properly and to request a trial key which will be sent to Rockfish automatically, generate a key automatically and install automatically.
|
||||
When a trial key is first installed the database is automatically erased thus preventing them from just endlesssly requestion trial keys to keep using it for free
|
||||
user can during trial pick different seed data for various scenarios at will
|
||||
They can install a fully registered key at any time by purchasing from within AyaNova (at first may need to be via current shareit system)
|
||||
|
||||
|
||||
FEATURES REQUIRED TO SUPPORT THIS
|
||||
|
||||
- We need a management console to view the load on our server and to alert us to slowdowns so we can expand the virtual server, also we need to be able to do it in more than one datacenter as we would want local endpoints for users ultimately
|
||||
- RockFish or some other application needs to be able to spin up a new server and db combo that is unique (docker container?) automatically and prune or get rid of it completely after XX days after trial has expired and it is still unlicensed
|
||||
- Rockfish automatic trial key fulfillment and delivery and installation
|
||||
- RAVEN built in form to request trial key and automatic fulfillment and install including db is erased any time a trial key is installed when a user already has a trial or no key installed
|
||||
- RAVEN built in purchase feature
|
||||
|
||||
|
||||
50
devdocs/specs/core-ui-design.txt
Normal file
@@ -0,0 +1,50 @@
|
||||
UI DESIGN DOC
|
||||
|
||||
Requirements
|
||||
- Responsive but favoring larger screens primarily
|
||||
- Smaller screens will be able to do everything but the layout is not the primary one
|
||||
- Service techs have an ipad or notebook
|
||||
- Anticipate a list of things each role needs immediately in front of them and try for that
|
||||
- componentized UI elements so can re-use and mix and even support customizing with user defined layout
|
||||
- So for example a customer list is a self contained subset of a list widget and can be plunked down anywhere required
|
||||
- This will save me time so identify the "types" of ui elements (picklist, filterable data list, entry form etc)
|
||||
- Then can build specific versions of the types identified like a client list is a specialized filterable data list etc.
|
||||
- If a list then it also knows which element is selected or a list of selected elements so other widgets can operate on it
|
||||
- A menu or command widget that can be inserted into another widget, i.e. a part picker for a workorder part list widget or elsewhere a part is required
|
||||
- This way I'm only making a few UI objects, not a new one for every single element.
|
||||
- Kind of making the pallet first of all required objects then I can "paint" with them on to the UI without getting bogged down in minute details every time I make a form
|
||||
- UI elements should be responsive and generic enough to work for many different use cases
|
||||
- they should be rights aware and mode aware (editable, read only) enough to handle all that without recoding again and again
|
||||
- the page or shell or whatever that holds the widgets should be end user customizable from a widget pallet.
|
||||
- The more self contained the widget the more useful
|
||||
|
||||
- Most people seem to prefer WBI over RI and the reason always seems to be RI is too simple or requires too many clicks
|
||||
- So plan on the bigger screen layout being the main UI and smaller screen secondary
|
||||
- I want things to be simpler and cleaner than it seems many people do so beware of that tendency
|
||||
- People don't want to have to open sub screens any more than absolutely necessary
|
||||
- Make sure a screen contains as much as possible to complete it on one screen
|
||||
- Clean interface with good negative space but not dumbed down too much
|
||||
- Pro-marketing style, stuff that makes it easier to sell
|
||||
- emphasize simple fonts with good contrast
|
||||
- Blue is a good colour, no purple or pastels
|
||||
- DAshboard / customizable UI
|
||||
- Ideally people can see as much detail as they want or remove unused ui widget elements
|
||||
- So, for example on the dashboard they can customize by plunking down a client list widget, adding a "My workorders" widget for a tech
|
||||
- or for a Ops person they can plunk down on their dashboard a current server status widget or active jobs widget etc
|
||||
|
||||
|
||||
Graphics and themes for AyaNova
|
||||
- No bitmap graphics, vector only!!
|
||||
- I like material design but it will remain to be seen for the front end.
|
||||
- For the manual and docs will use material theme with MKDOCS generator.
|
||||
|
||||
|
||||
LOGO
|
||||
- Need to standardize on a logo and stick to it from now on
|
||||
- Simple and clean (script AyaNova used for RI is maybe out)
|
||||
- Single colour canucks blue with green contrast if absolutely necessary (don't make it necessary)
|
||||
|
||||
COLOURS
|
||||
- Canucks colours of course, Blue primary and green secondary. RI already uses them, get the hex codes there.
|
||||
- No indigo or pastels
|
||||
|
||||
14
devdocs/specs/hosting.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
HOSTING
|
||||
|
||||
We will be hosting users in rental mode and for trialling, we need systems in place to support that automatically, see core-trial-evaluation doc for more details
|
||||
|
||||
- We need a management console to view the load on our server and to alert us to slowdowns so we can expand the virtual server, also we need to be able to do it in more than one datacenter as we would want local endpoints for users ultimately
|
||||
- We need to be able to tell if a specific AyaNova server is consuming disproportionate resources.
|
||||
- We need some kind of way to cap the load on a server autoamtically so they can't just have thousands of clients attempting to connect at once (i.e. they should self host if they are over a certain size / bandwith usage)
|
||||
- RockFish or some other application needs to be able to spin up a new server and db combo that is unique (docker container?) automatically and prune or get rid of it completely after XX days after trial has expired and it is still unlicensed
|
||||
- Rockfish automatic trial key fulfillment and delivery and installation
|
||||
- RAVEN built in form to request trial key and automatic fulfillment and install including db is erased any time a trial key is installed when a user already has a trial or no key installed
|
||||
- RAVEN built in purchase feature
|
||||
- Rockfish built in ability to work with built in purchase to license a user and for RAVEN to check in with for new license info (i.e. monthly rental charge and license fulfillment)
|
||||
|
||||
|
||||
BIN
devdocs/specs/joyce-planning-docs-used/1_Roles.odt
Normal file
BIN
devdocs/specs/joyce-planning-docs-used/Workorder.odt
Normal file
269
devdocs/specs/marketing-sales-planning.txt
Normal file
@@ -0,0 +1,269 @@
|
||||
MARKETING AND SALES IDEAS
|
||||
|
||||
|
||||
We will have two markets:
|
||||
|
||||
SAAS rental market
|
||||
- User pays monthly fee to use RAVEN on our servers
|
||||
- Pricing will be a factor of how many scheduleable users, accounting add-on and some kind of protection for us relating to bandwidth and number of simultaneous users
|
||||
- Taking into account support and upkeep costs, hosting costs etc
|
||||
- One flat price per month, as long as they keep paying they keep using and will get automatically updated and support included
|
||||
- When they stop paying they can no longer use it but we won't delete the data immediately but give them time and warnings before we do
|
||||
- User has the option to switch to a perpetual license by buying one in which case they can then run it self hosted or elsewhere
|
||||
- This is important to marketing because it removes one of the negative preconceptions around risk using an online service (vendor bankruptcy kind of issues)
|
||||
|
||||
Perpetual market with maintenance subscriptions
|
||||
- User buys a perpetual license for the software itself
|
||||
- specifically this means they buy service tech licenses to add in single or groups with discount per group (to be determined based on what makes us the most money and target market)
|
||||
- User pays for support and updates subscription either monthly, yearly or 3 yearly with increasing discounts for each
|
||||
- User can update at any time while active
|
||||
|
||||
|
||||
|
||||
WHAT WE ARE SELLING
|
||||
|
||||
SAAS RENTAL
|
||||
The ability to use the software for month to month priced by number of techs, accounting addon and whatever we deem to be the fair price to us for server hosting plus bandwidth overages
|
||||
- ideally charge them for a 20 dollar server but use a 10 dollar server kind of thing
|
||||
|
||||
Perpetual:
|
||||
AyaNova service technician licenses by count with sliding discount for volume purchases at time of purchase
|
||||
Accounting add-on
|
||||
Maintenance subscription for support and updates
|
||||
- First purchase includes one year of support and udpates (or maybe a shorter time frame at a lower price? Have to research that to see what gets the most revenue)
|
||||
- Subsequent years are at a rate based on whether they have the accounting add-on (one flat price) and so many dollars per service tech but sliding scale so costs less the more techs
|
||||
|
||||
PHRASING
|
||||
Marketing will sell the licenses with the phrase "service technician" or whatever the most universal equivalent is with a blurb at the top
|
||||
explaining that a "service Tech" just means anything you schedule so could be busses or etc, but then forever afterwards just say service tech as it's less confusing
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
WORKSHEET PLANNING STUFF FOLLOWS:
|
||||
|
||||
|
||||
POTENTIAL NEW LICENSING SYSTEMS
|
||||
|
||||
What we need:
|
||||
- Absolutely need recurring revenue, no two ways about that, it's our bread and butter
|
||||
- Ease of administration and ease of billing procedures
|
||||
- Automated system as much as possible for both us and the client
|
||||
|
||||
What our customers need
|
||||
- Clear and easily explainable costs
|
||||
- Ease of purchase and control over billing system
|
||||
- Easy to upgrade or downgrade the level of licenses and options
|
||||
- A reason to keep paying and not simply cancel a subscription when it suits them
|
||||
|
||||
|
||||
|
||||
SUBSCRIPTION / RENTAL / SAAS LICENSE
|
||||
- Users pay a subscription fee to be able to use the software
|
||||
- Includes support and updates as long as their subscription is active
|
||||
- Software does not work without a subscription at all, they are renting the software
|
||||
- Quote from a link below: Prices for annual subscriptions are generally some fraction of the perpetual license alternative.
|
||||
Many companies aim for a crossover point of 4 to 5 years after which the costs for the annual license begin to exceed the perpetual license fee plus the annual support costs.
|
||||
So, an annual license might be priced at 40% of a perpetual license.
|
||||
- Not sure about that math
|
||||
|
||||
|
||||
SAAS PRICING MODELS COMMONLY USED
|
||||
|
||||
FLAT RATE
|
||||
- Same flat price for everything for everybody, i.e. 300 a month all in
|
||||
- PROS: Easy to sell, market and explain
|
||||
- CONS: rarely used less precedent, harder to extract value from different market segments, you get one basically, no flexibility for the customer
|
||||
|
||||
|
||||
USAGE BASED
|
||||
- Charged based on actual usage, some metric from the software, perhaps number of records or speed of access or space consumed or something
|
||||
- Workorders per month or scheduled items per month would be a good metric for us
|
||||
- Often used for scenarios where someone is hosting something and paying for the costs to host it, i.e. if we were hosting for people
|
||||
- PROS: price scales with use, reduces barriers to use, scales for heavy users compensating for extra costs
|
||||
- CONS: rarely used less precedent, harder to predict revenue, harder to predict costs for the end user,
|
||||
|
||||
CAPACITY / INFRASTRUCTURE *
|
||||
- Based on something that represents the size of the company
|
||||
- We sort of do this now with the price per scheduled user tiers
|
||||
- Normally it's done with some kind of hardware or other consideration such as the size of the server in use based on the number of CPU cores on the machine
|
||||
- Idea is it's cheap for a small company and more for a larger company
|
||||
|
||||
TIERED PRICING *
|
||||
- Multiple packages at different price Points (MOSTLY THIS IS ABOUT VOLUME, NOT FEATURE DIFFERENCES SPECIFICALLY)
|
||||
- Usually Small, medium and large (3.5 options is the average)
|
||||
- Basic, Pro, Enterprise
|
||||
- Targets a customer type for each tier trying to take into effect the needs at each tier with an appropriate level of service
|
||||
|
||||
- PROS:
|
||||
- Commonly used pricing model so easier to market and for people to understand and compare due to familiarity
|
||||
- appeals to multiple buyer personas better chance of a sale
|
||||
- maximize revenue (offering a single $100 package will overcharge users with a $10 willingness to pay, and undercharge users willing to spend $200.)
|
||||
- clear upselling route
|
||||
- CONS:
|
||||
- Potentially confusing (Do not have too many tiers, maybe 3 with an out for excessive costs to us like too much bandwidth or too many support requests)
|
||||
- Appeals to too many people (don't try to have a tier for every segment)
|
||||
- Heavy user risk (if top people exceed expected costs no room to move them up [This would be solved by having some kind of cap and overage charges I guess])
|
||||
|
||||
|
||||
PER USER PRICING
|
||||
- Exactly what it says, per POTENTIAL user of the software. 1 user one price, 2 users double the price 3 treble etc etc.
|
||||
- Paid monthly or annually (discount of 12% for example)
|
||||
- PROS: Simple, revenue scales with users, predictable revenue
|
||||
- CONS: Hard to track, limits adoption as it can get expensive and so people might hold off too many users which increases churn due to less users tied to prodcut, rewards cheating, doesn't reflect real value; company doesn't see the difference from one user to 5 for them
|
||||
|
||||
|
||||
PER ACTIVE USER PRICING
|
||||
- Only charge per active user, don't pay for accounts that are unused
|
||||
- Often targetted at Enterprise level customers
|
||||
- Slack is a famous example
|
||||
- PROS: Customers only pay for what they actually use so reduces their risk of widespread adoption as it won't cost any more if they end up not using it
|
||||
- CONS: Not much value to a smaller business so extra incentive, not idea for us because it means we don't have defined recurring revenue just like the USAGE BASED model
|
||||
|
||||
|
||||
PER FEATURE PRICING
|
||||
- "Features" as value metric, not "users" (kind of a mix of what we do now)
|
||||
- Tiers as in TIERED PRICING but by packages with sets of features more than by usage
|
||||
- PROS: strong upgrade incentive, Compensate for delivery heavy (expensive to provide) features
|
||||
- CONS: Difficult to get right (hard to determine which features go in which package could get ugly), Leaves a bad taste / easier to feel resentful for the customer as they know they could be getting more
|
||||
|
||||
|
||||
FREEMIUM PRICING
|
||||
- Tier but with a free level to hook people in.
|
||||
- Pricing begins along a dimension of features, capacity or use case (you can use it to do this but not to do that; e.g. not free for commercial use)
|
||||
- PROS: foot in the door, viral marketing potential for word of mouth
|
||||
- CONS: Revenue killer; all revenue to support free needs to come from paid, increases churn, can devalue your core service
|
||||
|
||||
|
||||
|
||||
=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
OUR PRICING STRATEGY
|
||||
- What is our pricing strategy?
|
||||
- Example pricing strategies
|
||||
- Penetration Pricing: "land and expand" initial unsustainably low pricing for short to medium time period to grab a market then upsell later once you have a big base
|
||||
- Captive pricing: low intial price for "core" product but lot's of add-on's that are required to really use it and those generate the revenue (inkjet printers)
|
||||
- Skimming pricing: start with an initial high price and slowly lower it over time (not sure this works in our market)
|
||||
- Prestige pricing: maintain a high price to convey prestige or have a high prestige tier (with a rounded price $500 not $499.99)
|
||||
- Free trial pricing: free month then charge to continue usage. Industry average is 50% adoption rate after free trial and 30 days is the average length
|
||||
|
||||
- I want it to be easy enough to sell that we never have to hard sell anyone on it or waste time convincing them, we don't want to have to do "sales" at all
|
||||
- Penetration or low pricing will give us growth and our expenses are not high so the more recurring revenue from as many sources as possible the better.
|
||||
- Rather see 500 low price customers than try to fight to keep 3 high price ones, far less risk
|
||||
- Possibly this means that we would take an average less profit on each sale
|
||||
- I'd like to see all options available for all users as much as possible. Maybe tiers based on scheduleable resources still? But that's hard to understand for users.
|
||||
|
||||
|
||||
|
||||
=-=-=-=-=-
|
||||
PRICING PSYCHOLOGY
|
||||
- Price Anchoring:
|
||||
- set an anchor price by highlighting the most expensive package first on the marketing page so that people judge the other prices by comparison to it
|
||||
- Maybe make the leftmost the most expensive or just otherwise highlight the most expensive so eyes are drawn to it first
|
||||
- Always start with the most expensive price first when marketing or discussing with a customer
|
||||
- Charm pricing
|
||||
- People heavily judge the leftmost digit in the price subconsciously so don't sell for $400 sell for $399; people see the 3 and mentally get stuck on it
|
||||
- Odd Even pricing
|
||||
- People might be getting used to the .99 thing so don't end in .99, use .98 or .23 or whatever instead just keep the first digit low
|
||||
- Product bundle pricing
|
||||
- bundle shit together; causes people to think outcomes rather than about the individual prices
|
||||
- Analysis paralysis
|
||||
- Do not offer more too many options (research suggests 7 options plus or minus 2. So 5 or less is safest.)
|
||||
- The biggest, most successful SaaS companies have an average of 3.5 packages available on their pricing pages.
|
||||
- Center stage effect
|
||||
- use the center column of multiple prices as the "most popular", or one we want to sell the most of; people will choose it more often by default all things being equal.
|
||||
|
||||
|
||||
|
||||
|
||||
REQUIREMENTS
|
||||
|
||||
- JSON format
|
||||
- secured with hash signature
|
||||
- All licensed things are in a collection, not just the add-ons
|
||||
- scheduledusers, accounting etc.
|
||||
- Old versions should work with new licenses but not the reverse
|
||||
- Has an ID and source values like current keys, ID may become much more important in future
|
||||
- ALL TRIAL KEYS ARE REGISTERED TO SOMEONE NO SUCH THING AS TWO LICENSES IN CIRCULATION WITH THE SAME NAME (i.e. no more "Unregistered trial" meaning it's a trial, every user will have a specific name to test it out)
|
||||
- Rockfish issues the trial key upon first request to empty db
|
||||
- Need indicator that it's a trial key / evaluation key
|
||||
|
||||
- All licensed things should each have an expiry date possible to turn off that feature
|
||||
- In the case of options they just stop working
|
||||
- In the case of main user license everything stops working
|
||||
- MUST be able to support monthly billing cycle (automatic license installation or approval so user doesn't have to install key every month)
|
||||
- Rockfish should have a licensed yay or nay or new available route for RAVEN to check periodically and also users can trigger a force check
|
||||
- RAVEN checks periodically for new license to fetch in line with billing cycle or expiry cycle.
|
||||
- SAFE fallback if can't contact license server and allow a few misses before something kicks in, but not to allow people to use unlimited by blocking rockfish for example
|
||||
- Support both perpetual license with rental maintenance subscription and rental license
|
||||
- Need expiry date for maintenance so code can query if eligable for updates or support requests
|
||||
- Need expiry date for entire license for rental and trial scenarios
|
||||
- Support update restriction based on build date and license
|
||||
|
||||
- ?? what about the add-on's expiring at the same time as the sched users. currently the expiration dates differing is a hassle, should it be still supported??
|
||||
- ?? what if we are not licensing by scheduled users anymore but by something else
|
||||
- ?? need to analyze sales and determine the percentage of each level of license issued so I can group into tiers
|
||||
- Current (5/29/2018) active subscription licenses:
|
||||
- Single = 35
|
||||
- Up to 5 = 18
|
||||
- Up to 10 = 10
|
||||
- Up to 15 = 1
|
||||
- Up to 20 = 12
|
||||
- up to 50 = 0
|
||||
- Up to 999 = 0
|
||||
- QBI
|
||||
- QBOI
|
||||
- PTI
|
||||
- WBI
|
||||
- RI
|
||||
- MBI
|
||||
- OLI
|
||||
- OutLookScheduleExport
|
||||
|
||||
|
||||
Maybe what is needed is:
|
||||
- A single license expiry date no matter what is in the license
|
||||
- A single support and updates date no matter what is in the license
|
||||
- A single license "type" to support tiered pricing
|
||||
- Tiers: 1 user, up to 10, up to 100, custom?
|
||||
- Discussed with Joyce and she made a good case for one by one license sales as most customers are small and tiers may not help as they are in the 10 and under stage anyway
|
||||
so maybe no tiers at all, just licenses for "service technicians" or some more generic term and make a not early that a "service technician" is any scheduleable resource
|
||||
and then cotinue to use the term service technician afterwards because it's what the majority are scheduling and it is far easier to explain.
|
||||
For the rental market though we will really need to take into account bandwidth so in future there may be more types of things to track.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
PRODUCT LICENSE CHANGES FROM v7
|
||||
|
||||
|
||||
Still optional and purchased separately as a single Accounting interface option:
|
||||
QBOI, QBI, PTI
|
||||
|
||||
Possible idea: sold as "Accounting add on" and the user can use one of those of their choice and can switch which means 1 product instead of three which might make keys easier.
|
||||
Possible downside is that we can't track who uses what or what we sell how much of so puts a kink in marketing or planning for deprecation or where to spend
|
||||
effort, however there are other ways of addressing that.
|
||||
|
||||
|
||||
//INCLUDED ADD-ON's
|
||||
OLI - INCLUDED / TURNED INTO GENERIC FEATURE IMPORT / EXPORT CONTACTS and SCHEDULE TO ICAL
|
||||
OutLookScheduleExport - INCLUDED / TURNED INTO GENERIC SCHED EXPORT ICAL FORMAT
|
||||
Export to xls - Included with RAVEN
|
||||
QuickNotification - Included with RAVEN
|
||||
ImportExportDuplicate - Included with RAVEN
|
||||
RI/WBI/MBI - UNNECESSARY with RAVEN
|
||||
|
||||
|
||||
|
||||
=====================================
|
||||
|
||||
RESEARCH SOURCES
|
||||
- http://www.reprisesoftware.com/blog/2017/08/implement-a-recurring-revenue-license-model/
|
||||
- https://www.linkedin.com/pulse/20140819084845-458042-the-change-from-perpetual-to-subscription-based-software-licensing
|
||||
- http://software-monetization.tmcnet.com/articles/429528-long-but-rewarding-road-a-recurring-revenue-model.htm
|
||||
- How to calculate pricing in a service business: https://www.patriotsoftware.com/accounting/training/blog/how-pricing-services-strategies-models-formula/
|
||||
- SAAS pricing models: https://www.cobloom.com/blog/saas-pricing-models
|
||||
10
devdocs/specs/noteable-changes-from-v7.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
NOTEABLE CHANGES
|
||||
|
||||
|
||||
This is a list in no order of noteable changes in RAVEN from v7.
|
||||
This is for the purpose of writing an overview and change doc for RAVEN release.
|
||||
|
||||
|
||||
REGIONS
|
||||
- Gone, now tags
|
||||
- Restrictions feature of regions gone, new roles might help for this scenario (limited roles)
|
||||
1
devdocs/specs/part-category-deprecated.txt
Normal file
@@ -0,0 +1 @@
|
||||
Replaced with tags case 3373
|
||||
1
devdocs/specs/plugin-dump.txt
Normal file
@@ -0,0 +1 @@
|
||||
Consolidating all import export to one plugin, see case 3503
|
||||
1
devdocs/specs/plugin-export-to-xls.txt
Normal file
@@ -0,0 +1 @@
|
||||
Consolidated all export import to one single feature see case 3503
|
||||
7
devdocs/specs/plugin-outlook-schedule.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
FIRST OF ALL CHECK TO SEE WHO IS USING THIS OR ANY OTHER FRINGE PLUGINS
|
||||
THAT IS A CURRENT SUBSCRIBER
|
||||
|
||||
Consolidate all outlook sched related plugins to a single way of dealing with outlook.
|
||||
Also maybe different types of schedule like google calendar etc etc
|
||||
|
||||
Might even be related to case 3503 the dump utility as it could dump sched items to ical format or whatever
|
||||
2
devdocs/specs/priorities.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Would like to replace with a tag, but they have colors.
|
||||
No cases regarding this that I can find in a quick search, but I suspect there was something somewhere, will update here when found
|
||||
33
devdocs/specs/todo-before-release.txt
Normal file
@@ -0,0 +1,33 @@
|
||||
TODO related to Release
|
||||
|
||||
|
||||
These are items that should be handled before release, a checklist of last minute stuff
|
||||
|
||||
ROCKFISH License key route stuff, make sure it's ready to make requests adn handle customers in real world scenario
|
||||
RAVEN LICENSE KEY TRIAL
|
||||
- make sure the various workarounds to enable trial key fetching are disabled / removed, see License.RequestTrialKey and various others
|
||||
- Basically, if you remove the special CONST guid trial dbid value the rest of the code that needs to be changed should jump out due to compiler errors
|
||||
|
||||
DbUtil::DBIsEmpty()
|
||||
- Check that shit was updated to ensure all relevant tables are involved
|
||||
|
||||
|
||||
All dependencies in AyaNova.csproj project file should be locked down to a version release
|
||||
- for example "Npgsql.EntityFrameworkCore.PostgreSQL" package should be set to a determined last change version
|
||||
- Then test everything, confirm all ok, and no more changes to that file until determined after release it's safe to do so
|
||||
- It should be a snapshot in time.
|
||||
|
||||
figure out the tiniest possible distribution and which docker container from Microsoft it will work with
|
||||
- right now using sdk version on DO server because of fuckery, but ultimately it needs to be the minimal possible
|
||||
|
||||
Branch / tags / repository
|
||||
- Need to figure out how to branch and tag properly so can start on next version without affecting release version
|
||||
- In other words snapshot the release in the repo.
|
||||
- Currently it's not an issue, but maybe it should be done properly
|
||||
|
||||
Remove widget route from release (or hide it) but keep in debug
|
||||
|
||||
DOCUMENTATION TODO's
|
||||
|
||||
Do a search for "TODO" all caps in the docs, there are things that are on hold and need to be fleshed out.
|
||||
Do not release with the todo tag in there still!!!!
|
||||
246
devdocs/specs/todo-original-todo-doc.txt
Normal file
@@ -0,0 +1,246 @@
|
||||
# TODO original todo docs
|
||||
|
||||
This was my original todo doc which I've pared down to only what I want to work on a few steps ahead.
|
||||
Keeping this for reference as it has a lot of ideas about core services etc that will be useful
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
A NEW START
|
||||
|
||||
|
||||
|
||||
Get shit done:
|
||||
|
||||
Use an agile like process and start coding ASAP with the lowest level pre-requisite stuff.
|
||||
|
||||
I get stuff done faster when I see results and coding isn't bad to start early as long as it's done in the correct order and is iterated agilely
|
||||
|
||||
Code tests and then code against them, don't get crazy about unit testing every little thing, just what is necessary to validate
|
||||
Using an agile-like process re-iterate and continually add new stuff while testing.
|
||||
|
||||
|
||||
I want an installable, testable product ready to deploy even if it only has one feature and no front end yet.
|
||||
|
||||
Then starting from that base, add in all the common, shared, features first.
|
||||
|
||||
Ideally we have a real product that can be installed and run as soon as possible and then build onto that and iterate, adding feature by feature.
|
||||
|
||||
|
||||
1) Determine the first things to implement, ideally everything that isn't a business object requirement, bootstrap process, configuration etc.
|
||||
2) Implement the core with sysop stuff, generator equivalent, db, maybe not even any user accounts yet, just the framework
|
||||
- Want to be able to install as in production, start the sever, configure, view the sysop stuff make an online account and test there etc
|
||||
3) Add a single simple business feature, maybe login and user creation, license etc
|
||||
- Code the front end for that feature, at this point need shell and etc.
|
||||
|
||||
4) Test all the above be sure about it, make sure it follows the guidelines for coding standards etc, then get into the actual features
|
||||
5) Code AyaNova business features
|
||||
- ** DO NOT WASTE TIME PLANNING AND DOCUMENTING EXISTING UNCHANGING FEATURES, LET AYANOVA SOURCE BE THE GUIDE FOR THAT **
|
||||
- Just document changes or new stuff only
|
||||
- This is where we get into the stuff below and the biz object cases etc
|
||||
- Code in order from the simplest most fundamental objects towards the most esoteric and complex multi object items
|
||||
- Save the most complex objects for last so as to save time as no doubt changes will come up in other coding
|
||||
- For example, there are no doubt objects that are mostly standalone and don't involve a lot of other objects, start there
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
******************************************************************************************************************
|
||||
******************************************************************************************************************
|
||||
OLD PLAN BELOW HERE KEPT FOR REF BUT NEW PLAN SUPERSEDES
|
||||
******************************************************************************************************************
|
||||
******************************************************************************************************************
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## FEATURE PLANNING / RE-ORG / AUDIT CASES
|
||||
|
||||
- PLANNING STEP 1: Go through all AyaNova 7.x, 8.x cases and any other places there is feature stuff
|
||||
- Move to RAVEN the ones that are worth keeping / looking into (snap judgements)
|
||||
- Set all cases moved to Raven to priority 3 indicating unprioritized
|
||||
- Stuff that will never be done for sure just close.
|
||||
- Stuff that won't be done in RAVEN but might be worth keeping until the release of RAVEN before closing drop priority down to 5
|
||||
- When done there should be no more cases above priority 5 in 7.x unless they will be fixed in 7.x
|
||||
|
||||
- PLANNING STEP 2: Examine RAVEN cases and re-prioritize and categorize them:
|
||||
- SET THE CATEGORY COLON DENOMINATED i.e. (CLIENT, REGION, WORKORDER:TASKS:CR:NEW, UI, SECURITY etc) by prepending the title with the category.
|
||||
- If it's completely new feature then the last tag should be ":NEW"
|
||||
- IF it's customer requested put a :CR tag in the title
|
||||
- UNPRIORITIZED
|
||||
- default for items until I categorize them
|
||||
- PRIORITY 3
|
||||
- MUST HAVE IN INITIAL RELEASE
|
||||
- PRIORITY 1 and 2
|
||||
- flag as highest priority 1 for work on first and 2 for work on secondly
|
||||
- SHOULD HAVE IN FUTURE RELEASE
|
||||
- PRIORITY 4 and 5
|
||||
- flag below highest as priority 4 or 5 depending on urgency
|
||||
|
||||
- PLANNING STEP 3 THE WORKORDER LAYOUT
|
||||
- This is tricky so needs a whole step
|
||||
- Determine new workorder layout and structure
|
||||
- Don't need exact features, rough is ok
|
||||
- Step 4 will get into the details of the workorder
|
||||
|
||||
******************************************************************************************************************
|
||||
******************************************************************************************************************
|
||||
NOT DONE BELOW HERE
|
||||
******************************************************************************************************************
|
||||
******************************************************************************************************************
|
||||
|
||||
|
||||
|
||||
|
||||
- PLANNING STEP 4 FEATURES DOCUMENTS
|
||||
- Read through my own notes in CODING-STANDARDS doc and REASEARCH.TXT and SOLUTIONS.TXT
|
||||
- VERY IMPORTANT stuff in there, a lot of sysops stuff and practical decision stuff
|
||||
- Review it all and update any related cases / make new cases as appropriate.
|
||||
|
||||
- Read through and triage Joyce's notes and docs for her v8 work.
|
||||
- I've re-written them all out sorted by last edited date
|
||||
- This will then be gone over more deeply as input into the later stage of feature development below
|
||||
|
||||
- Go through AyaNova and all it's add-on's and put major feature areas into a list in features.txt
|
||||
- Then, go through all cases and all of Joyce's v8 docs for that item / area and create a document for each feature area
|
||||
- Fill in with the features and changes to that item in order of implementation
|
||||
- IMPORT: Important that I also document how existing data will be imported for this feature
|
||||
- If a completely new feature is determined then it needs to be in there as well (tags)
|
||||
|
||||
- Find common features that are shared with objects so they can be "Interfaced" and planned for
|
||||
- Tag handling, UI preferences and MRU's and user settings
|
||||
- Localization stuff
|
||||
- Notification
|
||||
- Common menu options that are shared by objects
|
||||
- Need a set of sections that deal with just this kind of thing as it will be coded FIRST.
|
||||
- Common UI widgets Document every common UI widget needed and where.
|
||||
- The RAVEN ui will be made up of a lot of tiny widgets that are re-used here and there, that's what I will be making so need to document that
|
||||
- Essentially everything in the UI will be a bunch of widgets housed in a bunch of modules housed in a shell
|
||||
- Identifying them all up front will save tremendous time in development
|
||||
- For example: in lots of places we need to show a filterable list of workorders tied to the object being viewed
|
||||
- Like on the client form you need to see a list of workorders for that client
|
||||
- On the unit form a list of workorders for that unit
|
||||
- So a WORKORDER LIST widget is one that will be re-used time and again so document that and it's features
|
||||
- Then it's just a matter of snapping it into any area it's needed
|
||||
- this will translate to a business object and probably a route in the api etc etc
|
||||
- Search widget and results display, that kind of thing
|
||||
- Identify the first object to code, ideally it should have the most common features and the least of it's own features so I can focus on the shared stuff.
|
||||
|
||||
|
||||
- What I want to end up with is a set of documents one for each major feature
|
||||
- In each document I want to list what is kept, what is changed and what is new
|
||||
- Add points of what each feature currently does and needs
|
||||
- Then go through the cases and note new and changes relevant to that object
|
||||
- Consider and make decisions and rank as priority 1 (in first release) and priority 2 - subsequent release
|
||||
- iterate until there is at the end what I'm after
|
||||
|
||||
- This is the list I will work from to ensure everything gets done
|
||||
- I want each documents items in the best order of implementation as much as possible
|
||||
- THIS IS IMPORTANT: I don't want to code a feature then see that I should have done something different later
|
||||
- Then I can go through it while coding as my todo list basically
|
||||
- Ideally then can test as each object is ported, kind of like each object is a new feature in quick release cycle
|
||||
|
||||
|
||||
|
||||
- PLANNING STEP 5 USER INTERFACE
|
||||
- Design the user interface for each section above, I want to see visual design that I can work from instead of winging it.
|
||||
- ?? Need a design for at least two views, desktop / tablet and phone but need to research this a bit I guess
|
||||
- Doesn't need to be perfect, a rough sketch and placement is fine.
|
||||
- Need to design the widgets essentially
|
||||
- Need to account for each view depending on ROLE viewing it.
|
||||
- Find the commonalities for the roles so there is a pre-defined levels of access for each widget that match roles
|
||||
- Like a Read only limited view, a full control view, a whatever view, each type named and replicate concept to all widgets
|
||||
- Widget should be aware of roles and what to allow for each of them
|
||||
|
||||
- STEP 6 CODING
|
||||
- Code the NFR / SHELL and fundamental / common features like tag handling etc.
|
||||
- Since a widget is kind of standalone ready, maybe a widget playground to test out widgets on a blank canvas without worrying about shell stuff at first.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- HOSTING planning
|
||||
- Just a light initial assesment will not be super important at first, only interested in big picture stuff that might affect coding
|
||||
- What will we need for hosting?
|
||||
- What is the unit of software that each customer would use
|
||||
- I.E. do we have one container per customer and spin them up as required
|
||||
- Apply to design
|
||||
|
||||
|
||||
|
||||
|
||||
## INITIAL CODING: FRAMEWORK SKELETON ALL NON FUNCTIONAL REQUIREMENTS
|
||||
|
||||
- Make a list of all the things that it should do for front and back end
|
||||
- This is really the foundational skeleton of everything and is the most important step, if it takes a while so be it, it will save a fortune in time later
|
||||
- The design should be frozen once this skeleton satisfies NFR
|
||||
- Once this is in place then can just fill in functionally required code and objects from design
|
||||
- It's also a reference point for any other new projects going forward so keep a snapshot / Subversion tag that THIS IS IT!!
|
||||
- There should be no research or figuring out required after the skeleton is completed (ideally)
|
||||
- Build initial framework first with all the "non functional requirement" (everything but the actual business classes) specific features in it. Take a snapshot so it can be the basis for other projects in future.
|
||||
- All tools should be in place
|
||||
- Development tools
|
||||
- Testing tools
|
||||
- Once we have something to test look into something like: https://jenkins.io/solutions/docker/ containerized?
|
||||
- Unit
|
||||
- Integration
|
||||
- Load
|
||||
- Building tools
|
||||
- Once we have something to build look into something like: https://jenkins.io/solutions/docker/ containerized?
|
||||
- Releasing
|
||||
- versioning
|
||||
- updating
|
||||
- containerization
|
||||
- All database types
|
||||
- Installer / installation
|
||||
- Management interfaces or tools
|
||||
- BACK END API
|
||||
- Sample routes with two api versions (0 and 1.0)
|
||||
- ideally a small sample of code showing all the layers of code involved with all the NFR features desired like circuit breakers you name it
|
||||
- Users and Client for starters with a list of clients, a client entry and edit and delete
|
||||
|
||||
- dependency injection / loose coupling
|
||||
- https://joonasw.net/view/aspnet-core-di-deep-dive
|
||||
|
||||
|
||||
- Upgradeable
|
||||
- Versionable
|
||||
- Circuit breakers
|
||||
- Performance and other metrics and logging
|
||||
- All the NFR in the PROCESS and BEST PRACTICES section of my coding-standards doc
|
||||
- UI Skeleton to support this backend skeleton
|
||||
- All NFR that are UI level (a SPA web application)
|
||||
- UI concerns that may affect back end stuff should be in the skeleton
|
||||
- The guts of the UI presentation host whatever it's called without the actual specific bits to the app
|
||||
- Loading
|
||||
- Upgrading
|
||||
- Handling errors
|
||||
- talking to the api
|
||||
|
||||
- TESTING
|
||||
-
|
||||
- Smoke test first in a limited test VM that simulates a droplet of 512mb ram, 1cpu, docker container with db etc
|
||||
- Test in a DO droplet under load with lots of data and see what memory and cpu and disk resources are required and how it performs
|
||||
|
||||
|
||||
|
||||
|
||||
## FR CODING
|
||||
- After the above move on to functional requirements coding
|
||||
- Test and deploy daily at least
|
||||
|
||||
|
||||
|
After Width: | Height: | Size: 392 KiB |
2
devdocs/specs/unit-of-measures.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
DEPRECATED?
|
||||
Still not decided yet but leaning heavily towards ditching it, see case 3433 and 2038
|
||||
3
devdocs/specs/unit-service-type.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
Deprecated for tag?
|
||||
Didn't actually check but it seems like a likely candidate.
|
||||
See case 3373
|
||||
2
devdocs/specs/user-certifications.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Deprecated maybe?
|
||||
See case 3440, 1138
|
||||
2
devdocs/specs/user-skills.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Deprecated maybe?
|
||||
See case 3440, 1138
|
||||
1
devdocs/specs/user-wiki.txt
Normal file
@@ -0,0 +1 @@
|
||||
WTF do we need this for?
|
||||
2
devdocs/specs/workorder-categories.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Deprecated to tags (verify this is possible)?
|
||||
See case 3373
|
||||
3
devdocs/specs/workorder-item-type.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
Deprecated to tags?
|
||||
See case 3373
|
||||
Need to confirm it will work, types don't appear to do anything and are documented as for filtering and reporting and sorting so that's a tag
|
||||
BIN
devdocs/specs/workorder.odt
Normal file
345
devdocs/todo.txt
Normal file
@@ -0,0 +1,345 @@
|
||||
# TODO (J.F.C. - Just fucking code it already)
|
||||
|
||||
Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOiIxNTI4MjEyNjI5IiwiZXhwIjoiMTUzMDgwNDYyOSIsImlzcyI6IkF5YU5vdmEiLCJpZCI6IjEifQ.Kst0iTQ2hLCKwEq6vkqmlSHrrmNqksxFkMM0PIRiyjA
|
||||
|
||||
|
||||
## IMMEDIATE ITEMS
|
||||
|
||||
CHOPPY DAY WORK
|
||||
+++++++++++++++
|
||||
|
||||
Move to new server
|
||||
- PLAN: Move off old server bit by bit, starting with repository, then
|
||||
- BACKUP
|
||||
- https://gztw1.nyc3.digitaloceanspaces.com
|
||||
- Backups would be to DO Spaces
|
||||
- New york is the only north american storage location but that's at least offsite
|
||||
- 5 bucks a month for 250gb storage and 1tb outbound traffic
|
||||
- Has a two month free trial option from the main do page, not within our droplets ui
|
||||
- https://www.digitalocean.com/community/tutorials/how-to-automate-backups-digitalocean-spaces
|
||||
- GIT SERVER
|
||||
- https://docs.gitea.io/en-us/install-with-docker/
|
||||
- First get a private git repo working on the D.O.
|
||||
- Then move all new dev to the private git server instead of the SEA repo
|
||||
|
||||
CODING WORK
|
||||
+++++++++++
|
||||
|
||||
Might be time to order all this to the best effectiveness if it isn't already.
|
||||
|
||||
Error messages / Numbers
|
||||
- All server error codes start with E1000, all API error codes start with E2000
|
||||
- Look for English text in all the messages so far and see if can be localized even crudely by google translate and do so
|
||||
- Make sure error numbers have a consistent system and don't conflict, I think there are two sets of error numbers, there should only be one
|
||||
- Make sure Every error has a number and that is documented in the manual
|
||||
- Locale keys for error numbers?? i.e. E1000, "blah blah error 1000"
|
||||
|
||||
Cleanup and sticking to the following code convention:
|
||||
All names are PascalCaseOnly with the following two exceptions:
|
||||
- function paramenter names are ALWAYS camelCased
|
||||
- CONST values are ALL_CAPS with underlines between for spaces
|
||||
|
||||
Created/Changed/Modifier/ Change / Audit log
|
||||
- Flesh out and implement fully
|
||||
- See cases, specs doc this has already been planned quite a bit
|
||||
- Modify existing routes / objects to use the log
|
||||
- Tests
|
||||
- Do I add more fields to the objects to cover same as v7 (currently only created is there in widget and others) or do I make use of the changelog table for that shit??
|
||||
|
||||
Changes needed to routes??
|
||||
- http://www.talkingdotnet.com/actionresult-t-asp-net-core-2-1/
|
||||
|
||||
Overall plan for now: anything standing in the way of making the initial client shell UI needs to be done first, everything else can wait
|
||||
- Localized text
|
||||
- Search and search text indexing
|
||||
-
|
||||
|
||||
Ensure all modern best practice security is properly enabled on helloayanova.com so testing is valid
|
||||
- https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security#Deployment_best_practices
|
||||
|
||||
CLIENT SHELL
|
||||
|
||||
Once I can make the client I need to get into that and make the shell and initial interface with enough stuff to do basic testing initially
|
||||
- Make sure to see the vue.js stuff in tools and below and contemplate it fully before committing to it
|
||||
- VUE was chosen some time ago and there are likely other things out now
|
||||
- Look and layout, graphics, logo, anything that is shell only
|
||||
- Menu system
|
||||
- Help link
|
||||
- search
|
||||
- Login , logout
|
||||
- License
|
||||
- Security / rights
|
||||
- See localized text / change locale
|
||||
|
||||
later
|
||||
- Widget CRUD and lists
|
||||
- All input controls and date localization etc etc
|
||||
|
||||
|
||||
|
||||
### ALL ITEMS
|
||||
|
||||
|
||||
- LOCALIZED TEXT
|
||||
- Localized text keys would be nice if they are understandable as is for API direct users so you don't need to be in the client to understand
|
||||
what's happening
|
||||
- Need a locale indepedent locale so that server errors without a corresponding user are localized to default english
|
||||
- Also maybe a locale can be chosen at the server for error messages since we'll go by code numbers anyway.
|
||||
- Starting to get to the point where I'll need this, i.e. error messages and logs that are part of core ops but need to be displayed in the UI
|
||||
- Need to go through the api and find all the plain text messages returned and convert to locale text keys
|
||||
- Ensure every error message has an error number of one kind or another and that they are not conflicted and easy to sort out if coming from server or api or etc
|
||||
- Need to suck out our paid for translations and convert them into new locale text format
|
||||
- DataDump??
|
||||
|
||||
|
||||
|
||||
- SEARCH TEXT
|
||||
- See spec docs
|
||||
|
||||
- TECH SUPPORT
|
||||
- Investigate how I can look at a customers DB with RAVEN
|
||||
- Data masking for dumps is a start
|
||||
- special "tech support" dump with masked customer information??
|
||||
- Customer has a "key" that they can see which customer is the substituted masked one so we don't know the customer name but they can reference it themselves
|
||||
- What about live looking at data through some feature?
|
||||
- What would I need to look at or what information would I need?
|
||||
- Be able to run a query directly and view results??
|
||||
- Be able to run a query provided
|
||||
- As a fix might need to enable customer to run a provided query.
|
||||
- Be able to view all the meta information about the postgres instance
|
||||
- Collation, sort order, languages, anything the user can set that could fuck up RAVEN
|
||||
|
||||
|
||||
- MODIFICATION / CHANGE LOG (see case 79)
|
||||
|
||||
- Visible ID number generator case 3544
|
||||
|
||||
- CUSTOM FIELDS (case 3426)
|
||||
|
||||
|
||||
|
||||
- Notification / generator / event of interest stuff (case 3491) BIG ONE
|
||||
- Need interface, code for triggering notifications in biz objects ITriggerable :)
|
||||
- Would it be more efficient to just process all notifications into the modification log regardless of subscribers
|
||||
|
||||
- CHILD objects need to point to their parent and be readable in code for searching and for opening objects based on child object
|
||||
- so all child objects need a typeandid of the immediate parent
|
||||
- Not sure where to document this so putting it here for reference
|
||||
- Required for opening a search result of a descendent that is not directly openable an
|
||||
- Need parent AyaType as an ENUM ATTRIBUTE in the AyaType table for easy traversal
|
||||
|
||||
|
||||
- Import V7
|
||||
- Tags - any type that is moving to tag can be coded now
|
||||
|
||||
- TESTING
|
||||
- Longevity test on the DO server I can have up and running see core-testing.txt doc
|
||||
|
||||
CLIENT
|
||||
|
||||
- WHEN HAVE CLIENT - Localization (see core-localization.md)
|
||||
- Time zone stuff (case 1912 related)
|
||||
- WHEN HAVE CLIENT - Layout / Form user setttings
|
||||
- WHEN HAVE CLIENT - Default form filling settings handling (case 3485)
|
||||
- WHEN HAVE CLIENT - Push notification to client
|
||||
- PUSH / POLL notification: determine and implement a system that can send notifications to client for things like
|
||||
- change of localized text (invalidate cache)
|
||||
- Server shutting down (log out asap)
|
||||
- business object notifications (new workorder, status change etc)
|
||||
- WHEN HAVE CLIENT Report route for widget
|
||||
- WHEN HAVE CLIENT - test my PickList, is it sufficient? (pageable, alpha pageable (A-D, E-G kind of thing?))
|
||||
- Case 1692
|
||||
- search by tags plus text plus maybe pageable or...???
|
||||
- WHEN HAVE CLIENT - ACTION / UI WIDGETS case 3460, 1729 UI as a collection of widgets stuff
|
||||
|
||||
|
||||
- When widget is completely done, go over it and see if anything can be made easier or better before proceeding
|
||||
- Generate seed data for tags
|
||||
- Better to do this when the above core items are done as it touches on them
|
||||
- Time zone
|
||||
- This is not specced anywhere, but here, not sure where to put it at the moment, hopefully by the time I get here I will know (global settings? User settings?)
|
||||
- Do not rely on the server's time zone setting, for example a docker container will be utc even if the server hosting it is pacific time
|
||||
- Instead, use UTC for everything and have configurable value for timezone offset
|
||||
|
||||
### NFR
|
||||
|
||||
- UPDATE SWASHBUCKLE / SWAGGER to support testing file upload if not too onerous
|
||||
- https://github.com/domaindrivendev/Swashbuckle/issues/280
|
||||
- http://www.talkingdotnet.com/how-to-upload-file-via-swagger-in-asp-net-core-web-api/
|
||||
|
||||
- IMPORT / EXPORT
|
||||
- biz object should import from v7, make a dummy import for widgets from something I've already exported in v7 (units? something with at least a name)
|
||||
- Make a route for import to upload an import file? Then it runs the import via the biz objects and the correct ordering?
|
||||
- OPS Functionality?? (maybe just biz admin only since it's dealing with actual biz data)
|
||||
- OPS can import ops related stuff??(notification email server settings etc)
|
||||
|
||||
- BACKUP and RESTORE and COPY automatically to storage offsite
|
||||
- Backup and restore data (widget, users etc)
|
||||
- Close AyaNova server, erase db(optionally?? maybe user wants to combine two separate db's), restore the data
|
||||
- See Discourse, they have some kind of AWS thing
|
||||
- Also maybe this is handy: http://www.talkingdotnet.com/webhooks-with-asp-net-core-dropbox-and-github/
|
||||
- Download backup, upload backup file
|
||||
- FTP automatically?
|
||||
|
||||
|
||||
- Need api speed test route (to independently of any particular object know how fast the connection is, for choosing a host site and troubleshooting)
|
||||
- Some kind of static test list that is perfectly reproducible on demand
|
||||
- Maybe a set routine of items to generate and return but in a way to disambiguate between slow server and slow connection
|
||||
- don't re-invent the wheel
|
||||
- What to test:
|
||||
- compute performance
|
||||
- DB performance
|
||||
- thoughput? Speed of network
|
||||
|
||||
- Need some way to know if AyaNova is taking longer than it should to process requests so it can be an alert of some kind
|
||||
- research how to time api avg running total or something, graph it for ops
|
||||
- keep data by class of operation or tag it somehow
|
||||
- Don't want it to actually slow performance
|
||||
- Maybe have a benchmark time for various ops gathered during debugging tests, then hard code in that benchmark and if it takes longer then it logs it
|
||||
- http://www.neekgreen.com/2017/11/06/easy-way-measure-execution-time-aspnetcore-action-method/
|
||||
- https://weblogs.asp.net/jeff/asp-net-core-middleware-to-measure-request-processing-time
|
||||
|
||||
|
||||
- SSL / TLS
|
||||
- Need to look into how to support this
|
||||
- Look into how the 2.1 dotnet will work with ssl so I do something relatively compatible
|
||||
|
||||
|
||||
- REPORTING
|
||||
- NOTE TO SELF: Don't report off Biz objects, make report specific objects. Better to have a reportclient list object and a selection client list object and etc than just a single client list doing duty as a selection box filler and a reporting object
|
||||
And also biz object interfaces ideas:
|
||||
ITaggable, ICustomFields, ISearchable, IExportable,IBizAction, IReportable (with sub interfaces for paging, format, report name and biz object for single and list etc,report stuff), ILocaleFields?, Etc
|
||||
Your welcome!🤘😎
|
||||
|
||||
|
||||
- CLIENT / UI DEVELOPMENT
|
||||
- CLIENT UI "WIDGETS" ("COMPONENTS")
|
||||
- Have UI testing scripts for developing UI. Scenario and then I can manually walk through it and see how ui responds to iterate from rough skeletal UI.
|
||||
- Make a script for top X scenarios in the work day if each role. That way can try early rough designs with neutral expectations and reiterate until adequate.
|
||||
- This way I won't design out of my ass without good input to riff off of.
|
||||
- Orient express is some good shit for the shabs!
|
||||
|
||||
|
||||
- Client: Start initial front end vue.js shell
|
||||
- Need way to shut down clients gracefully (added value in api return? Polling [can't recall what the decision was in polling])
|
||||
|
||||
- Implement unlicensed server mode in client
|
||||
- Request trial key from client, server fetches and installs
|
||||
|
||||
- Implement trial mode in client
|
||||
- Seed data, erase db etc
|
||||
|
||||
|
||||
- AFTER APRIL 1st 2018 - Dotnet 2.1 changes I must look into:
|
||||
- will be rtm this summer-ish
|
||||
- some swagger and webapi affecting changes
|
||||
- efcore group by and lazy loading thing, might be relevant, nto sure
|
||||
- HTTPS by default
|
||||
- Look into it, see if something will be so huge that I should use the beta now for dev.
|
||||
|
||||
|
||||
|
||||
BUNDLING
|
||||
- ONCE there is any front end code worthwhile then - Automatic build process Bundling and minification
|
||||
- Parcel is coming on strong and requires supposedly zero configuration: https://parceljs.org/getting_started.html
|
||||
- READ THIS: https://docs.microsoft.com/en-us/aspnet/core/client-side/using-gulp
|
||||
- THEN SET IT UP
|
||||
- Need automatic file copy script or whatever to copy docs to wwwroot folder somewhere so it can be served by the ayanova server
|
||||
- Also need to package front end stuff for deployment as well with versioning etc, not webpack but along those lines
|
||||
|
||||
|
||||
|
||||
|
||||
- Think about hostname being included with license, maybe a requirement?
|
||||
- Localhost only or domain?
|
||||
- Or would it be too much hassle with non-domain sites
|
||||
- Fail2ban? Will we need that kind of thing incorporated into AyaNova?
|
||||
- See again how it works and then look into application level ideas for that or what is smart for hardening, throttling etc
|
||||
|
||||
- 2FA Two factor authentication
|
||||
- How hard is this to support in AyaNova?
|
||||
- What about apps like Authy?
|
||||
- It might be important to enable this for ops and biz accounts? Or at least be an option?
|
||||
|
||||
- LETS ENCRYPT
|
||||
- https://www.humankode.com/ssl/how-to-set-up-free-ssl-certificates-from-lets-encrypt-using-docker-and-nginx
|
||||
- https://weblog.west-wind.com/posts/2017/Sep/09/Configuring-LetsEncrypt-for-ASPNET-Core-and-IIS
|
||||
- https://stackoverflow.com/questions/48272373/how-do-i-get-letsencrypt-working-in-asp-net-core-razor-pages
|
||||
- NGINX: https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-16-04
|
||||
- Review again if need NGINX in front of kestrel still and if so then go this route
|
||||
|
||||
|
||||
|
||||
|
||||
- DO WE NEED TO BE ABLE TO admin db from within raven ops route even if can't connect to db?
|
||||
- Don't want users to have to use a db admin tool for anything, so should have ability to do whatever is necessary from ops route with db
|
||||
- REQUIRED OPS: See if db exists
|
||||
|
||||
|
||||
DEPLOYMENT AND TESTING
|
||||
- DOCKER As soon as viable make an automatic build to a docker image for testing and deployment
|
||||
- https://docs.microsoft.com/en-us/dotnet/core/docker/building-net-docker-images
|
||||
- remote server online testing
|
||||
- Better product will come from running it as it will be used as early and often as possible
|
||||
- Look into renting the cheapest server on linode or digital ocean for dev testing
|
||||
- set it up to pull the latest from repo so it automatically updates (or a docker image maybe)
|
||||
- possibly set up integration test that goes off the remote server
|
||||
- WINDOWS Automatic installer for testing
|
||||
- Need a windows test bed and regular testing on it to confirm multiplatform interoperability
|
||||
- Maybe a windows installer or maybe a docker image
|
||||
|
||||
- Integration test that can be pointed at any location to run a series of tests
|
||||
|
||||
|
||||
|
||||
MANUAL
|
||||
- Add how to use swagger UI and authentication
|
||||
|
||||
|
||||
BOOTSTRAPPING AUTHENTICATION
|
||||
|
||||
- Manager account can only login from localhost? - HMM...think on it
|
||||
|
||||
- What if can only create new users if manager account is changed from default credentials?
|
||||
- that way you start your setup with one account, change it and then it's safe to do remote work
|
||||
|
||||
|
||||
- by default manager account is only one with rights to configure server or user accounts.
|
||||
- Doesn't have any rights to business config, only server config and CRUD user accounts
|
||||
- So at least one admin user needs to be created locallhy before it can be used remotely to set up users
|
||||
- manager account cannot be changed in any way, so always has default password and login
|
||||
- JWT token check must check if local when it's the manager account user id 1
|
||||
- This is so a user can't copy the creds from browser and use them remotely
|
||||
- Test that shit from host
|
||||
- What if can't run a browser in host for some reason???
|
||||
- need an override that does allow remote manager account
|
||||
|
||||
- devise a way to bootstrap with no user accounts and a way to reset back to that
|
||||
|
||||
|
||||
|
||||
|
||||
MAKE MVP
|
||||
|
||||
- Has the following features:
|
||||
Alpha-0
|
||||
- installer for windows and docker container
|
||||
- VUE.js Front end that supports at minimum a login / logout and empty shell
|
||||
- Shows server and client versions (about)
|
||||
- https://vuejs.github.io/vetur/
|
||||
- swagger docs and way to view them via the api
|
||||
- User manual docs
|
||||
- See the tools.txt section search for vue
|
||||
|
||||
Alpha-1
|
||||
- Can do some minimal config like seed data, erase db etc
|
||||
- Ops interface showing status and can view log etc
|
||||
|
||||
etc
|
||||
|
||||
|
||||
|
||||
LONG TERM::::::
|
||||
|
||||
## MVP and iterate
|
||||
|
||||
304
devdocs/tools.txt
Normal file
@@ -0,0 +1,304 @@
|
||||
MSBUILD reference for csproj file
|
||||
https://docs.microsoft.com/en-us/visualstudio/msbuild/msbuild#BKMK_ProjectFile
|
||||
|
||||
|
||||
Quickly generate large files in windows: http://tweaks.com/windows/62755/quickly-generate-large-test-files-in-windows/
|
||||
|
||||
Never download another 100mb test file or waste time searching for a large file. Sometimes you need a large file fast to test data transfers or disk performance. Windows includes a utility that allows you to quickly generate a file of any size instantly.
|
||||
|
||||
Open an administrative level command prompt.
|
||||
|
||||
Run the following command:
|
||||
|
||||
fsutil file createnew <file> <size in bytes>
|
||||
|
||||
For example, this command will create a 1GB file called 1gb.test on my desktop:
|
||||
|
||||
fsutil file createnew c:\users\steve\desktop\1gb.test 1073741824
|
||||
|
||||
The key is to input the size of the file in bytes so here are some common file sizes to save you from math:
|
||||
|
||||
1 MB = 1048576 bytes
|
||||
|
||||
100 MB = 104857600 bytes
|
||||
|
||||
1 GB = 1073741824 bytes
|
||||
|
||||
10 GB = 10737418240 bytes
|
||||
|
||||
100 GB =107374182400 bytes
|
||||
|
||||
1 TB = 1099511627776 bytes
|
||||
|
||||
10 TB =10995116277760 bytes
|
||||
|
||||
=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
|
||||
After a reboot of dev machine the containers are stopped and need to be restarted on reboot with this command:
|
||||
docker start dock-pg10 dock-pgadmin
|
||||
|
||||
**USE PGADMIN**
|
||||
Browse to localhost 5050
|
||||
|
||||
Can view the status of all containers with
|
||||
docker ps -a
|
||||
|
||||
|
||||
|
||||
## FRONT END
|
||||
|
||||
** Best stuff for JS and development of 2017, lots of useful info here:
|
||||
https://risingstars.js.org/2017/en/
|
||||
|
||||
**BROWSER CLIENT LIBRARIES**
|
||||
- JQuery
|
||||
- Lodash
|
||||
- UI FRAMEWORK - VUE.JS ?? (maybe a framework or maybe vanilla JS)
|
||||
- https://github.com/mattkol/Chromely (New alternative to Electron and supports VUE.JS)
|
||||
- https://vuejs.github.io/vetur/
|
||||
- Good discussion here about general UI and also what is "infuriating" in web material design when you just want to get on with work
|
||||
- Got to be careful not to make it too good looking at the expense of performance
|
||||
- that being said it has other good discussion on stuff in general
|
||||
|
||||
|
||||
## BUNDLING AND MINIFICATION
|
||||
- https://docs.microsoft.com/en-us/aspnet/core/client-side/bundling-and-minification?tabs=visual-studio%2Caspnetcore2x
|
||||
- Gulp seems best for me: https://docs.microsoft.com/en-us/aspnet/core/client-side/using-gulp
|
||||
|
||||
## DEPLOYMENT
|
||||
|
||||
### DEPLOY TO DIGITAL OCEAN TEST SERVER
|
||||
|
||||
- PUBLISH:
|
||||
- Make sure updated version number first!!
|
||||
- Need to be in C:\data\code\raven\server\AyaNova\
|
||||
- Then run command:
|
||||
- dotnet publish -o C:\data\code\raven\dist\docker\linux-x64\ayanovadocker\files\ -c Release
|
||||
- COPY
|
||||
- Use filezilla to copy files that are new up to server
|
||||
- Copy to "/home/john/xfer/ayanovadocker/files"
|
||||
- These two files (and any other changes that are relevant)
|
||||
- C:\data\code\raven\dist\docker\linux-x64\ayanovadocker\files\AyaNova.dll
|
||||
- C:\data\code\raven\dist\docker\linux-x64\ayanovadocker\files\AyaNova.pdb
|
||||
- CONSOLE TO SERVER VIA PUTTY
|
||||
- Bring down current containers:
|
||||
- navigate to ~/xfer folder
|
||||
- execute sudo docker-compose down
|
||||
|
||||
- Build new image forcing it to update as it sometimes doesn't
|
||||
- sudo docker-compose build --force-rm --pull
|
||||
|
||||
- Run new image
|
||||
- sudo docker-compose up -d
|
||||
|
||||
- Restart NGINX container as it seems to lose it's mind when the AyaNova container is restarted (502 BAD GATEWAY error)
|
||||
- from /docker/letsencrypt-docker-nginx/src/production run sudo docker-compose up -d
|
||||
- Or just use the restartnginx.sh script in xfer at the server
|
||||
|
||||
- Test
|
||||
- If 502 BAD GATEWAY then AyaNova server is not up so the NGINX config bombs because it's proxying to it.
|
||||
- Actually, it just happened and what needs to be done is AyaNova container needs to be running BEFORE nginx container or it seems to get stuck
|
||||
- Check logs with sudo docker logs [containerID] to find out what happened
|
||||
- Or in some cases (once) Digital Ocean fucked up something
|
||||
|
||||
|
||||
|
||||
|
||||
### Publish command line:
|
||||
|
||||
Windows 64 bit:
|
||||
dotnet publish -o /home/john/Documents/raven/dist/server/win-x64/ -r win-x64 -c Release --self-contained
|
||||
dotnet publish -o C:\data\code\raven\dist\server\win-x64\ -r win-x64 -c Release --self-contained
|
||||
|
||||
|
||||
Linux 64 bit:
|
||||
|
||||
Normal build without all the .net files (not self contained)
|
||||
This is appropriate for docker based distribution since another image will contain the .net runtime:
|
||||
|
||||
#### DEFAULT BUILD COMMAND
|
||||
dotnet publish -o C:\data\code\raven\dist\docker\linux-x64\ayanovadocker\files\ -c Release
|
||||
|
||||
(linux)
|
||||
dotnet publish -o ~/Documents/raven/dist/server/linux-x64/ayanovadocker/files/ -c Release
|
||||
|
||||
|
||||
Self contained (this is appropriate for non containerized distribution, but still requires some Linux native requirements - see below):
|
||||
dotnet publish -o C:\data\code\raven\dist\server\linux-x64\ -r linux-x64 -c Release --self-contained
|
||||
dotnet publish -o ~/Documents/raven/dist/server/linux-x64/ -r linux-x64 -c Release --self-contained
|
||||
|
||||
Needed to change permissions on the AyaNova file to make it executable and also it requires these pre-requisites and probably more:
|
||||
apt-get install libunwind8
|
||||
apt-get install libcurl3
|
||||
|
||||
//.net core 2.x linux native requirements
|
||||
https://docs.microsoft.com/en-us/dotnet/core/linux-prerequisites?tabs=netcore2x
|
||||
|
||||
|
||||
|
||||
Windows 32 bit:
|
||||
dotnet publish -o /home/john/Documents/raven/dist/server/win-x86/ -r win-x86 -c Release --self-contained
|
||||
|
||||
Self contained Windows 10 x64:
|
||||
dotnet publish -o /home/john/Documents/raven/dist/server/win10x64/ -r win10-x64 -c Release --self-contained
|
||||
|
||||
PORTABLE RID's:
|
||||
win-x64
|
||||
win-x86
|
||||
linux-x64
|
||||
|
||||
//D.O. Linux
|
||||
ubuntu.16.04-x64 //<--- ends up being the same size as portable linux 64 so not really necessary
|
||||
|
||||
|
||||
- https://docs.microsoft.com/en-us/dotnet/core/deploying/index
|
||||
- https://docs.microsoft.com/en-us/aspnet/core/host-and-deploy/index?tabs=aspnetcore2x
|
||||
- https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-publish?tabs=netcore2x
|
||||
- https://docs.microsoft.com/en-us/dotnet/core/rid-catalog
|
||||
|
||||
|
||||
### DOCKER
|
||||
|
||||
- Build containers:
|
||||
- john@debian9John:~/Documents/raven/dist/docker/linux-x64$ docker-compose build
|
||||
- Run it:
|
||||
- :~/Documents/raven/dist/docker/linux-x64$ docker-compose up -d
|
||||
- Build it in prep for running it:
|
||||
- dotnet publish -o C:\data\code\raven\dist\docker\linux-x64\ayanovadocker\files\ -c Release
|
||||
- john@debian9John:~/Documents/raven/server/AyaNova$ dotnet publish -o ~/Documents/raven/dist/docker/linux-x64/ayanovadocker/files -c Release
|
||||
|
||||
|
||||
- OPTIONAL SAVING IMAGES (probably will never use this again but keeping for the info)
|
||||
- Save image:
|
||||
- docker image save -o .\image\ay-alpha2 gztw/ayanova
|
||||
- Note: if you use a tag name or repo name it's preserved but if you use an image id it loses the tags
|
||||
- Not compressed, can be compressed about 60% smaller
|
||||
- Load image:
|
||||
- docker image load -i saved_image_file_name_here
|
||||
|
||||
|
||||
####
|
||||
- Running docker at our D.O. server
|
||||
- run AyaNova container FIRST sudo docker-compose up -d at ~/xfer/
|
||||
- To update:
|
||||
- run a publish command to publish to my local dist/linux-x64/ayanovadocker/files
|
||||
- Then use Filezilla to copy up to the server at ~/xfer/ayanovadocker/files
|
||||
- Optionally, update the ~/xfer/docker-compose to set a new version number for the image name ("alpha-5" etc or maybe remove the name in future)
|
||||
- If necessary do a docker-compose build to rebuild
|
||||
- run Nginx server:
|
||||
- from /docker/letsencrypt-docker-nginx/src/production run sudo docker-compose up -d
|
||||
- If necessary can switch to root with command: sudo su -
|
||||
- documented here: https://www.humankode.com/ssl/how-to-set-up-free-ssl-certificates-from-lets-encrypt-using-docker-and-nginx
|
||||
|
||||
|
||||
## TESTING
|
||||
- DATA SEEDING: https://github.com/bchavez/Bogus (a port of faker.js)
|
||||
|
||||
|
||||
**DEVELOPMENT TOOLS**
|
||||
- TASK RUNNER - npm scripts
|
||||
- CODE CHECK (linter) ??
|
||||
- TEST unit / integration: Mocha
|
||||
- Subversion
|
||||
|
||||
|
||||
### DOCKER NGINX LETS ENCRYPT CERTBOT
|
||||
- https://www.humankode.com/ssl/how-to-set-up-free-ssl-certificates-from-lets-encrypt-using-docker-and-nginx
|
||||
- https://github.com/humankode/letsencrypt-docker-nginx/blob/master/src/production/production.conf
|
||||
|
||||
|
||||
|
||||
INITIALLY FETCH CERTIFICATES (MUST START LETSENCRYPT NGINX CONTAINER FIRST AND STOP ALL OTHERS)
|
||||
|
||||
#### STAGING
|
||||
sudo docker run -it --rm \
|
||||
-v /docker-volumes/etc/letsencrypt:/etc/letsencrypt \
|
||||
-v /docker-volumes/var/lib/letsencrypt:/var/lib/letsencrypt \
|
||||
-v /docker/letsencrypt-docker-nginx/src/letsencrypt/letsencrypt-site:/data/letsencrypt \
|
||||
-v "/docker-volumes/var/log/letsencrypt:/var/log/letsencrypt" \
|
||||
certbot/certbot \
|
||||
certonly --webroot \
|
||||
--email support@ayanova.com --agree-tos --no-eff-email \
|
||||
--webroot-path=/data/letsencrypt \
|
||||
--staging \
|
||||
-d helloayanova.com -d www.helloayanova.com -d v8.helloayanova.com -d test.helloayanova.com
|
||||
|
||||
#### PRODUCTION
|
||||
sudo docker run -it --rm \
|
||||
-v /docker-volumes/etc/letsencrypt:/etc/letsencrypt \
|
||||
-v /docker-volumes/var/lib/letsencrypt:/var/lib/letsencrypt \
|
||||
-v /docker/letsencrypt-docker-nginx/src/letsencrypt/letsencrypt-site:/data/letsencrypt \
|
||||
-v "/docker-volumes/var/log/letsencrypt:/var/log/letsencrypt" \
|
||||
certbot/certbot \
|
||||
certonly --webroot \
|
||||
--email support@ayanova.com --agree-tos --no-eff-email \
|
||||
--webroot-path=/data/letsencrypt \
|
||||
-d helloayanova.com -d www.helloayanova.com -d v8.helloayanova.com -d test.helloayanova.com
|
||||
|
||||
|
||||
#### SAMPLE OUTPUT:
|
||||
john@ubuntu-s-1vcpu-1gb-sfo2-01:/docker/letsencrypt-docker-nginx/src/letsencrypt$ sudo docker run -it --rm \
|
||||
> -v /docker-volumes/etc/letsencrypt:/etc/letsencrypt \
|
||||
> -v /docker-volumes/var/lib/letsencrypt:/var/lib/letsencrypt \
|
||||
> -v /docker/letsencrypt-docker-nginx/src/letsencrypt/letsencrypt-site:/data/letsencrypt \
|
||||
> -v "/docker-volumes/var/log/letsencrypt:/var/log/letsencrypt" \
|
||||
> certbot/certbot \
|
||||
> certonly --webroot \
|
||||
> --email support@ayanova.com --agree-tos --no-eff-email \
|
||||
> --webroot-path=/data/letsencrypt \
|
||||
> -d helloayanova.com -d www.helloayanova.com
|
||||
Saving debug log to /var/log/letsencrypt/letsencrypt.log
|
||||
Plugins selected: Authenticator webroot, Installer None
|
||||
Obtaining a new certificate
|
||||
Performing the following challenges:
|
||||
http-01 challenge for helloayanova.com
|
||||
http-01 challenge for www.helloayanova.com
|
||||
Using the webroot path /data/letsencrypt for all unmatched domains.
|
||||
Waiting for verification...
|
||||
Cleaning up challenges
|
||||
|
||||
IMPORTANT NOTES:
|
||||
- Congratulations! Your certificate and chain have been saved at:
|
||||
/etc/letsencrypt/live/helloayanova.com/fullchain.pem
|
||||
Your key file has been saved at:
|
||||
/etc/letsencrypt/live/helloayanova.com/privkey.pem
|
||||
Your cert will expire on 2018-06-10. To obtain a new or tweaked
|
||||
version of this certificate in the future, simply run certbot
|
||||
again. To non-interactively renew *all* of your certificates, run
|
||||
"certbot renew"
|
||||
- Your account credentials have been saved in your Certbot
|
||||
configuration directory at /etc/letsencrypt. You should make a
|
||||
secure backup of this folder now. This configuration directory will
|
||||
also contain certificates and private keys obtained by Certbot so
|
||||
making regular backups of this folder is ideal.
|
||||
- If you like Certbot, please consider supporting our work by:
|
||||
|
||||
Donating to ISRG / Let's Encrypt: https://letsencrypt.org/donate
|
||||
Donating to EFF: https://eff.org/donate-le
|
||||
|
||||
|
||||
=-=-=-=-=-=-=-=-
|
||||
|
||||
|
||||
GRAFANA / INFLUXDB / DOCKER
|
||||
|
||||
Container to run the whole shebang:
|
||||
|
||||
- https://github.com/philhawthorne/docker-influxdb-grafana
|
||||
docker run -d \
|
||||
--name docker-influxdb-grafana \
|
||||
-p 3003:3003 \
|
||||
-p 3004:8083 \
|
||||
-p 8086:8086 \
|
||||
-p 22022:22 \
|
||||
-v /path/for/influxdb:/var/lib/influxdb \
|
||||
-v /path/for/grafana:/var/lib/grafana \
|
||||
philhawthorne/docker-influxdb-grafana:latest
|
||||
|
||||
NOTE: you can leave out the paths and it works and the name is a little verbose
|
||||
|
||||
Dashboard for Grafana and app.metrics:
|
||||
- https://grafana.com/dashboards/2125
|
||||
|
||||
|
||||
2959
dist/assets/grafana/AyaNova metrics-1525464233568.json
vendored
Normal file
4
dist/docker/linux-x64/ayanovadocker/dockerfile
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM microsoft/dotnet:2.1-aspnetcore-runtime
|
||||
WORKDIR /app
|
||||
COPY ./files .
|
||||
ENTRYPOINT ["dotnet", "AyaNova.dll"]
|
||||
52
dist/docker/linux-x64/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
|
||||
metrics:
|
||||
image: philhawthorne/docker-influxdb-grafana:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "3003:3003"
|
||||
- "3004:8083"
|
||||
- "8086:8086"
|
||||
- "22022:22"
|
||||
|
||||
postgresserver:
|
||||
image: postgres:alpine
|
||||
restart: always
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
POSTGRES_PASSWORD: letmein
|
||||
volumes:
|
||||
- /var/lib/ayanova/db:/var/lib/postgresql/data
|
||||
|
||||
ayanova:
|
||||
image: gztw/ayanova:v8.0.0
|
||||
restart: always
|
||||
ports:
|
||||
- 7575:7575
|
||||
volumes:
|
||||
- /var/lib:/var/lib
|
||||
environment:
|
||||
AYANOVA_USE_URLS: http://*:7575
|
||||
AYANOVA_DB_CONNECTION: User ID=postgres;Password=letmein;Host=postgresserver;Port=5432;Database=AyaNova;Pooling=true;
|
||||
AYANOVA_FOLDER_USER_FILES: /var/lib/ayanova/files/user
|
||||
AYANOVA_FOLDER_BACKUP_FILES: /var/lib/ayanova/files/backup
|
||||
AYANOVA_LOG_PATH: /var/lib/ayanova
|
||||
AYANOVA_LOG_LEVEL: Info
|
||||
AYANOVA_METRICS_USE_INFLUXDB: "true"
|
||||
AYANOVA_METRICS_INFLUXDB_BASEURL: http://metrics:8086
|
||||
# AYANOVA_PERMANENTLY_ERASE_DATABASE: "true"
|
||||
build:
|
||||
context: ./ayanovadocker
|
||||
dockerfile: Dockerfile
|
||||
links:
|
||||
- postgresserver
|
||||
depends_on:
|
||||
- "postgresserver"
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker-network
|
||||
41
dist/docker/linux-x64/docker-compose.yml.original.b4.metrics
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
|
||||
postgresserver:
|
||||
image: postgres:alpine
|
||||
restart: always
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
POSTGRES_PASSWORD: letmein
|
||||
volumes:
|
||||
- /var/lib/ayanova/db:/var/lib/postgresql/data
|
||||
|
||||
ayanova:
|
||||
image: gztw/ayanova:v8.0.0
|
||||
restart: always
|
||||
ports:
|
||||
- 7575:7575
|
||||
volumes:
|
||||
- /var/lib:/var/lib
|
||||
environment:
|
||||
AYANOVA_USE_URLS: http://*:7575
|
||||
AYANOVA_DB_CONNECTION: User ID=postgres;Password=letmein;Host=postgresserver;Port=5432;Database=AyaNova;Pooling=true;
|
||||
AYANOVA_FOLDER_USER_FILES: /var/lib/ayanova/files/user
|
||||
AYANOVA_FOLDER_BACKUP_FILES: /var/lib/ayanova/files/backup
|
||||
AYANOVA_LOG_PATH: /var/lib/ayanova
|
||||
AYANOVA_LOG_LEVEL: Info
|
||||
# AYANOVA_PERMANENTLY_ERASE_DATABASE: "true"
|
||||
build:
|
||||
context: ./ayanovadocker
|
||||
dockerfile: Dockerfile
|
||||
links:
|
||||
- postgresserver
|
||||
depends_on:
|
||||
- "postgresserver"
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker-network
|
||||
18
dist/docker/linux-x64/host/docker-nginx-ayanova-sample-config/letsencrypt/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
letsencrypt-nginx-container:
|
||||
container_name: 'letsencrypt-nginx-container'
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/conf.d/default.conf
|
||||
- ./letsencrypt-site:/usr/share/nginx/html
|
||||
networks:
|
||||
- docker-network
|
||||
|
||||
networks:
|
||||
docker-network:
|
||||
driver: bridge
|
||||
@@ -0,0 +1,14 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>Let's Encrypt First Time Cert Issue Site</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Hello world</h1>
|
||||
<p>
|
||||
This is the temporary site that will only be used for the very first time SSL certificates are issued by Let's Encrypt's
|
||||
certbot.
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
13
dist/docker/linux-x64/host/docker-nginx-ayanova-sample-config/letsencrypt/nginx.conf
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name helloayanova.com www.helloayanova.com v8.helloayanova.com test.helloayanova.com;
|
||||
|
||||
location ~ /.well-known/acme-challenge {
|
||||
allow all;
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEA2wcsrWmfQbGC0V8eW14YPtYA1jt2dNeqV6B7Z/w0GnrwjL+xuYhG
|
||||
LzDhQuJvhEsDFCd//roBXWOFOZdAR0otkcxaQ+AaP0z/0UsC8NWGnM1G6q4fBju/
|
||||
y9e+dqjybyHIX10FtTj/gKV8lBcWJIw7cMmlAShj6xfd1zPPehNswLiRrWHusL/E
|
||||
5GkV/x4U76KbViqqTqrV5J6dmnxaNk4s8AphGvqeu/UrewjVf8C+fl6hljICUayJ
|
||||
WzHd5Ss/CASPRk91nnhcP9r3XZNyuPkyxmJrlZVElsC94T5Chnth+uix4TpBV/2P
|
||||
0Ax8sCLPVlw9Op7Bu7fJ+QJ5gbVk9n93mwIBAg==
|
||||
-----END DH PARAMETERS-----
|
||||
22
dist/docker/linux-x64/host/docker-nginx-ayanova-sample-config/production/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
production-nginx-container:
|
||||
container_name: 'production-nginx-container'
|
||||
image: nginx:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./production.conf:/etc/nginx/conf.d/default.conf
|
||||
- ./production-site:/usr/share/nginx/html
|
||||
- ./dh-param/dhparam-2048.pem:/etc/ssl/certs/dhparam-2048.pem
|
||||
- /docker-volumes/etc/letsencrypt/live/helloayanova.com/fullchain.pem:/etc/letsencrypt/live/helloayanova.com/fullchain.pem
|
||||
- /docker-volumes/etc/letsencrypt/live/helloayanova.com/privkey.pem:/etc/letsencrypt/live/helloayanova.com/privkey.pem
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: docker-network
|
||||
@@ -0,0 +1,13 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>HelloAyaNova</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Hello AyaNova</h1>
|
||||
<p>
|
||||
Test site
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
142
dist/docker/linux-x64/host/docker-nginx-ayanova-sample-config/production/production.conf
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name helloayanova.com www.helloayanova.com;
|
||||
location ^~ /.well-known/acme-challenge {
|
||||
root /usr/share/nginx/html;
|
||||
default_type text/plain;
|
||||
allow all;
|
||||
}
|
||||
location / {
|
||||
rewrite ^ https://$host$request_uri? permanent;
|
||||
}
|
||||
}
|
||||
#https://helloayanova.com
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
server_name helloayanova.com;
|
||||
server_tokens off;
|
||||
ssl_certificate /etc/letsencrypt/live/helloayanova.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/helloayanova.com/privkey.pem;
|
||||
ssl_buffer_size 8k;
|
||||
ssl_dhparam /etc/ssl/certs/dhparam-2048.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5;
|
||||
ssl_ecdh_curve secp384r1;
|
||||
ssl_session_tickets off;
|
||||
# OCSP stapling
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver 8.8.8.8;
|
||||
location ^~ /.well-known/acme-challenge {
|
||||
root /usr/share/nginx/html;
|
||||
default_type text/plain;
|
||||
allow all;
|
||||
}
|
||||
return 301 https://www.helloayanova.com$request_uri;
|
||||
}
|
||||
#https://www.helloayanova.com
|
||||
#This is the "web" server for static files outside of AyaNova app server
|
||||
server {
|
||||
server_name www.helloayanova.com;
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
server_tokens off;
|
||||
ssl on;
|
||||
ssl_buffer_size 8k;
|
||||
ssl_dhparam /etc/ssl/certs/dhparam-2048.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5;
|
||||
ssl_ecdh_curve secp384r1;
|
||||
ssl_session_tickets off;
|
||||
# OCSP stapling
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver 8.8.8.8 8.8.4.4;
|
||||
ssl_certificate /etc/letsencrypt/live/helloayanova.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/helloayanova.com/privkey.pem;
|
||||
location ^~ /.well-known/acme-challenge {
|
||||
root /usr/share/nginx/html;
|
||||
default_type text/plain;
|
||||
allow all;
|
||||
}
|
||||
|
||||
|
||||
location / {
|
||||
#security headers
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload";
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-Frame-Options "DENY" always;
|
||||
#CSP
|
||||
add_header Content-Security-Policy "frame-src 'self'; default-src 'self'; script-src 'self' 'unsafe-inline' https://maxcdn.bootstrapcdn.com https://ajax.googleapis.com; img-src 'self'; style-src 'self' https://maxcdn.bootstrapcdn.com; font-src 'self' data: https://maxcdn.bootstrapcdn.com; form-action 'self'; upgrade-insecure-requests;" always;
|
||||
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
|
||||
}
|
||||
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
}
|
||||
|
||||
#https://v8.helloayanova.com, https://test.helloayanova.com helloayanova
|
||||
server {
|
||||
server_name test.helloayanova.com v8.helloayanova.com;
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
server_tokens off;
|
||||
ssl on;
|
||||
ssl_buffer_size 8k;
|
||||
ssl_dhparam /etc/ssl/certs/dhparam-2048.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5;
|
||||
ssl_ecdh_curve secp384r1;
|
||||
ssl_session_tickets off;
|
||||
# OCSP stapling
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver 8.8.8.8 8.8.4.4;
|
||||
ssl_certificate /etc/letsencrypt/live/helloayanova.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/helloayanova.com/privkey.pem;
|
||||
location ^~ /.well-known/acme-challenge {
|
||||
root /usr/share/nginx/html;
|
||||
default_type text/plain;
|
||||
allow all;
|
||||
}
|
||||
|
||||
location / {
|
||||
#security headers
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload";
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-Frame-Options "DENY" always;
|
||||
|
||||
#CSP
|
||||
#https://developers.google.com/web/fundamentals/security/csp/
|
||||
add_header Content-Security-Policy "frame-src 'self'; default-src 'self'; script-src 'self' 'unsafe-inline' https://apis.google.com; img-src 'self' data:; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com/; font-src 'self' https://fonts.googleapis.com/ https://fonts.gstatic.com; form-action 'self'; upgrade-insecure-requests;" always;
|
||||
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
|
||||
|
||||
#This is "ayanova" because it's the docker network and port
|
||||
proxy_pass http://ayanova:7575;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "";
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
|
||||
#These timeouts are only required for large trial data generation which should be re-coded to start the process and return immediately
|
||||
#AS of alpha-4 large data generation on D.O. takes 1'04'' so setting these to 3 minutes as a safe margin
|
||||
|
||||
proxy_connect_timeout 180;
|
||||
proxy_send_timeout 180;
|
||||
proxy_read_timeout 180;
|
||||
send_timeout 180;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
6
dist/docker/linux-x64/restartnginx.sh
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
cd /docker/letsencrypt-docker-nginx/src/production
|
||||
docker-compose down
|
||||
docker-compose up -d
|
||||
#docker start dock-pg10 dock-pgadmin
|
||||
#/docker/letsencrypt-docker-nginx/src/production run sudo docker-compose up -d
|
||||
254
docs/8.0/ayanova/docs/_placeholder.md
Normal file
@@ -0,0 +1,254 @@
|
||||
# Placeholder
|
||||
|
||||
This is a placeholder page for sections that are not written yet
|
||||
|
||||
#STANDARDS FOR AYANOVA DOCS
|
||||
|
||||
All one or two # headings are all capse, three or more #'s are regular sentence case.
|
||||
|
||||
|
||||
|
||||
## Body copy
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras arcu libero,
|
||||
mollis sed massa vel, *ornare viverra ex*. Mauris a ullamcorper lacus. Nullam
|
||||
urna elit, malesuada eget finibus ut, ullamcorper ac tortor. Vestibulum sodales
|
||||
pulvinar nisl, pharetra aliquet est. Quisque volutpat erat ac nisi accumsan
|
||||
tempor.
|
||||
|
||||
**Sed suscipit**, orci non pretium pretium, quam mi gravida metus, vel
|
||||
venenatis justo est condimentum diam. Maecenas non ornare justo. Nam a ipsum
|
||||
eros. [Nulla aliquam](/) orci sit amet nisl posuere malesuada. Proin aliquet
|
||||
nulla velit, quis ultricies orci feugiat et. `Ut tincidunt sollicitudin`
|
||||
tincidunt. Aenean ullamcorper sit amet nulla at interdum.
|
||||
|
||||
## Headings
|
||||
|
||||
### The 3rd level
|
||||
|
||||
#### The 4th level
|
||||
|
||||
##### The 5th level
|
||||
|
||||
###### The 6th level
|
||||
|
||||
## Headings <small>with secondary text</small>
|
||||
|
||||
### The 3rd level <small>with secondary text</small>
|
||||
|
||||
#### The 4th level <small>with secondary text</small>
|
||||
|
||||
##### The 5th level <small>with secondary text</small>
|
||||
|
||||
###### The 6th level <small>with secondary text</small>
|
||||
|
||||
## Blockquotes
|
||||
|
||||
> Morbi eget dapibus felis. Vivamus venenatis porttitor tortor sit amet rutrum.
|
||||
Pellentesque aliquet quam enim, eu volutpat urna rutrum a. Nam vehicula nunc
|
||||
mauris, a ultricies libero efficitur sed. *Class aptent* taciti sociosqu ad
|
||||
litora torquent per conubia nostra, per inceptos himenaeos. Sed molestie
|
||||
imperdiet consectetur.
|
||||
|
||||
### Blockquote nesting
|
||||
|
||||
> **Sed aliquet**, neque at rutrum mollis, neque nisi tincidunt nibh, vitae
|
||||
faucibus lacus nunc at lacus. Nunc scelerisque, quam id cursus sodales, lorem
|
||||
[libero fermentum](/) urna, ut efficitur elit ligula et nunc.
|
||||
|
||||
> > Mauris dictum mi lacus, sit amet pellentesque urna vehicula fringilla.
|
||||
Ut sit amet placerat ante. Proin sed elementum nulla. Nunc vitae sem odio.
|
||||
Suspendisse ac eros arcu. Vivamus orci erat, volutpat a tempor et, rutrum.
|
||||
eu odio.
|
||||
|
||||
> > > `Suspendisse rutrum facilisis risus`, eu posuere neque commodo a.
|
||||
Interdum et malesuada fames ac ante ipsum primis in faucibus. Sed nec leo
|
||||
bibendum, sodales mauris ut, tincidunt massa.
|
||||
|
||||
### Other content blocks
|
||||
|
||||
> Vestibulum vitae orci quis ante viverra ultricies ut eget turpis. Sed eu
|
||||
lectus dapibus, eleifend nulla varius, lobortis turpis. In ac hendrerit nisl,
|
||||
sit amet laoreet nibh.
|
||||
``` js hl_lines="8"
|
||||
var _extends = function(target) {
|
||||
for (var i = 1; i < arguments.length; i++) {
|
||||
var source = arguments[i];
|
||||
for (var key in source) {
|
||||
target[key] = source[key];
|
||||
}
|
||||
}
|
||||
return target;
|
||||
};
|
||||
```
|
||||
|
||||
> > Praesent at `:::js return target`, sodales nibh vel, tempor felis. Fusce
|
||||
vel lacinia lacus. Suspendisse rhoncus nunc non nisi iaculis ultrices.
|
||||
Donec consectetur mauris non neque imperdiet, eget volutpat libero.
|
||||
|
||||
## Lists
|
||||
|
||||
### Unordered lists
|
||||
|
||||
* Sed sagittis eleifend rutrum. Donec vitae suscipit est. Nullam tempus tellus
|
||||
non sem sollicitudin, quis rutrum leo facilisis. Nulla tempor lobortis orci,
|
||||
at elementum urna sodales vitae. In in vehicula nulla, quis ornare libero.
|
||||
|
||||
* Duis mollis est eget nibh volutpat, fermentum aliquet dui mollis.
|
||||
* Nam vulputate tincidunt fringilla.
|
||||
* Nullam dignissim ultrices urna non auctor.
|
||||
|
||||
* Aliquam metus eros, pretium sed nulla venenatis, faucibus auctor ex. Proin ut
|
||||
eros sed sapien ullamcorper consequat. Nunc ligula ante, fringilla at aliquam
|
||||
ac, aliquet sed mauris.
|
||||
|
||||
* Nulla et rhoncus turpis. Mauris ultricies elementum leo. Duis efficitur
|
||||
accumsan nibh eu mattis. Vivamus tempus velit eros, porttitor placerat nibh
|
||||
lacinia sed. Aenean in finibus diam.
|
||||
|
||||
### Ordered lists
|
||||
|
||||
1. Integer vehicula feugiat magna, a mollis tellus. Nam mollis ex ante, quis
|
||||
elementum eros tempor rutrum. Aenean efficitur lobortis lacinia. Nulla
|
||||
consectetur feugiat sodales.
|
||||
|
||||
2. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur
|
||||
ridiculus mus. Aliquam ornare feugiat quam et egestas. Nunc id erat et quam
|
||||
pellentesque lacinia eu vel odio.
|
||||
|
||||
1. Vivamus venenatis porttitor tortor sit amet rutrum. Pellentesque aliquet
|
||||
quam enim, eu volutpat urna rutrum a. Nam vehicula nunc mauris, a
|
||||
ultricies libero efficitur sed.
|
||||
|
||||
1. Mauris dictum mi lacus
|
||||
2. Ut sit amet placerat ante
|
||||
3. Suspendisse ac eros arcu
|
||||
|
||||
2. Morbi eget dapibus felis. Vivamus venenatis porttitor tortor sit amet
|
||||
rutrum. Pellentesque aliquet quam enim, eu volutpat urna rutrum a. Sed
|
||||
aliquet, neque at rutrum mollis, neque nisi tincidunt nibh.
|
||||
|
||||
3. Pellentesque eget `:::js var _extends` ornare tellus, ut gravida mi.
|
||||
``` js hl_lines="1"
|
||||
var _extends = function(target) {
|
||||
for (var i = 1; i < arguments.length; i++) {
|
||||
var source = arguments[i];
|
||||
for (var key in source) {
|
||||
target[key] = source[key];
|
||||
}
|
||||
}
|
||||
return target;
|
||||
};
|
||||
```
|
||||
|
||||
3. Vivamus id mi enim. Integer id turpis sapien. Ut condimentum lobortis
|
||||
sagittis. Aliquam purus tellus, faucibus eget urna at, iaculis venenatis
|
||||
nulla. Vivamus a pharetra leo.
|
||||
|
||||
### Definition lists
|
||||
|
||||
Lorem ipsum dolor sit amet
|
||||
|
||||
: Sed sagittis eleifend rutrum. Donec vitae suscipit est. Nullam tempus
|
||||
tellus non sem sollicitudin, quis rutrum leo facilisis. Nulla tempor
|
||||
lobortis orci, at elementum urna sodales vitae. In in vehicula nulla.
|
||||
|
||||
Duis mollis est eget nibh volutpat, fermentum aliquet dui mollis.
|
||||
Nam vulputate tincidunt fringilla.
|
||||
Nullam dignissim ultrices urna non auctor.
|
||||
|
||||
Cras arcu libero
|
||||
|
||||
: Aliquam metus eros, pretium sed nulla venenatis, faucibus auctor ex. Proin
|
||||
ut eros sed sapien ullamcorper consequat. Nunc ligula ante, fringilla at
|
||||
aliquam ac, aliquet sed mauris.
|
||||
|
||||
## Code blocks
|
||||
|
||||
### Inline
|
||||
|
||||
Morbi eget `dapibus felis`. Vivamus *`venenatis porttitor`* tortor sit amet
|
||||
rutrum. Class aptent taciti sociosqu ad litora torquent per conubia nostra,
|
||||
per inceptos himenaeos. [`Pellentesque aliquet quam enim`](/), eu volutpat urna
|
||||
rutrum a.
|
||||
|
||||
Nam vehicula nunc `:::js return target` mauris, a ultricies libero efficitur
|
||||
sed. Sed molestie imperdiet consectetur. Vivamus a pharetra leo. Pellentesque
|
||||
eget ornare tellus, ut gravida mi. Fusce vel lacinia lacus.
|
||||
|
||||
### Listing
|
||||
|
||||
#!js hl_lines="8"
|
||||
var _extends = function(target) {
|
||||
for (var i = 1; i < arguments.length; i++) {
|
||||
var source = arguments[i];
|
||||
for (var key in source) {
|
||||
target[key] = source[key];
|
||||
}
|
||||
}
|
||||
return target;
|
||||
};
|
||||
|
||||
## Horizontal rules
|
||||
|
||||
Aenean in finibus diam. Duis mollis est eget nibh volutpat, fermentum aliquet
|
||||
dui mollis. Nam vulputate tincidunt fringilla. Nullam dignissim ultrices urna
|
||||
non auctor.
|
||||
|
||||
***
|
||||
|
||||
Integer vehicula feugiat magna, a mollis tellus. Nam mollis ex ante, quis
|
||||
elementum eros tempor rutrum. Aenean efficitur lobortis lacinia. Nulla
|
||||
consectetur feugiat sodales.
|
||||
|
||||
## Data tables
|
||||
|
||||
| Sollicitudo / Pellentesi | consectetur | adipiscing | elit | arcu | sed |
|
||||
| ------------------------ | ----------- | ---------- | ------- | ---- | --- |
|
||||
| Vivamus a pharetra | yes | yes | yes | yes | yes |
|
||||
| Ornare viverra ex | yes | yes | yes | yes | yes |
|
||||
| Mauris a ullamcorper | yes | yes | partial | yes | yes |
|
||||
| Nullam urna elit | yes | yes | yes | yes | yes |
|
||||
| Malesuada eget finibus | yes | yes | yes | yes | yes |
|
||||
| Ullamcorper | yes | yes | yes | yes | yes |
|
||||
| Vestibulum sodales | yes | - | yes | - | yes |
|
||||
| Pulvinar nisl | yes | yes | yes | - | - |
|
||||
| Pharetra aliquet est | yes | yes | yes | yes | yes |
|
||||
| Sed suscipit | yes | yes | yes | yes | yes |
|
||||
| Orci non pretium | yes | partial | - | - | - |
|
||||
|
||||
Sed sagittis eleifend rutrum. Donec vitae suscipit est. Nullam tempus tellus
|
||||
non sem sollicitudin, quis rutrum leo facilisis. Nulla tempor lobortis orci,
|
||||
at elementum urna sodales vitae. In in vehicula nulla, quis ornare libero.
|
||||
|
||||
| Left | Center | Right |
|
||||
| :--------- | :------: | ------: |
|
||||
| Lorem | *dolor* | `amet` |
|
||||
| [ipsum](/) | **sit** | |
|
||||
|
||||
Vestibulum vitae orci quis ante viverra ultricies ut eget turpis. Sed eu
|
||||
lectus dapibus, eleifend nulla varius, lobortis turpis. In ac hendrerit nisl,
|
||||
sit amet laoreet nibh.
|
||||
|
||||
<table>
|
||||
<colgroup>
|
||||
<col width="30%">
|
||||
<col width="70%">
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr class="header">
|
||||
<th>Table</th>
|
||||
<th>with colgroups (Pandoc)</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Lorem</td>
|
||||
<td>ipsum dolor sit amet.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Sed sagittis</td>
|
||||
<td>eleifend rutrum. Donec vitae suscipit est.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
37
docs/8.0/ayanova/docs/api-console.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# API EXPLORER CONSOLE
|
||||
|
||||
The AyaNova server uses [Swagger-ui](https://www.swagger.io) to provide an interactive live api explorer and documentation console for developers to learn about and experiment with the AyaNova REST API.
|
||||
|
||||
You can access the api explorer console by navigating with your browser to this path on your AyaNova API server:
|
||||
`/api-docs/`
|
||||
|
||||
For example if your AyaNova server were located on port 7575 of the local computer you would connect to it via this url:
|
||||
`http://localhost:7575/api-docs/`
|
||||
|
||||
## Authentication
|
||||
|
||||
Most of the API endpoints in AyaNova require authentication to use them. The API console supports the ability to set a authorization token so you can fully test all routes.
|
||||
|
||||
To obtain a token expand the "Auth" route in the main console and enter a value for login and password and click on the "Try it out" button to obtain an API token.
|
||||
|
||||
The "response body" section will contain the return value, something similar to this:
|
||||
|
||||
``` hl_lines="5"
|
||||
{
|
||||
"ok": 1,
|
||||
"issued": 1518034370,
|
||||
"expires": 1520626370,
|
||||
"token": "xyGhbGciOiJIUzI1NiIsInR4cCI6IkpXVCJ9.utJpYXQ4OiIxNqE4MDM0MzcfIiwiZXhwjjoiMTUyMDYyNjM8MCIsImlocyI0IkF53U5vdmEiLCJpZCI6IjEifQ.z7QaHKt2VbcysunIvsfa-51X7owB1EYcyhpkdkfaqzy",
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
|
||||
The highlighted line above contains the token you require, copy the token value not including the quotation marks. This is your access token.
|
||||
|
||||
Click on the "Authorize" button at the top of the API console and a popup dialog box will open. In the "Value" box the dialog enter the word Bearer followed by a space and then your api token, for example using the above you would paste in:
|
||||
|
||||
`Bearer xyGhbGciOiJIUzI1NiIsInR4cCI6IkpXVCJ9.utJpYXQ4OiIxNqE4MDM0MzcfIiwiZXhwjjoiMTUyMDYyNjM8MCIsImlocyI0IkF53U5vdmEiLCJpZCI6IjEifQ.z7QaHKt2VbcysunIvsfa-51X7owB1EYcyhpkdkfaqzy`
|
||||
|
||||
then click on the "Authorize" button inside the popup dialog box.
|
||||
|
||||
You have now saved your credentials (until you close or reload this browser window) and can access any of the API endpoints in this test console you have permission to access with the credentials you supplied earlier.
|
||||
25
docs/8.0/ayanova/docs/api-error-codes.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# API ERROR CODES
|
||||
|
||||
The AyaNova API will return an [error response](api-response-format.md#error-responses) when an error condition arises.
|
||||
|
||||
All API error codes are numbers between 2000 and 3000 and are intended to be consumed by software clients or for reference purposes for developers.
|
||||
|
||||
API error codes are different from [server error codes](ops-error-codes.md) which are intended for AyaNova system operators and related only to the running of the server itself.
|
||||
|
||||
Here are all the API level error codes that can be returned by the API server:
|
||||
|
||||
| CODE | MEANING |
|
||||
| ----- | ------------------------------ |
|
||||
| 2000 | API closed - Server is running but access to the API has been closed to all users |
|
||||
| 2001 | API closed all non OPS routes - Server is running but access to the API has been restricted to only server maintenance operations related functionality |
|
||||
| 2002 | Internal error from the API server, details in [server log](common-log.md) file |
|
||||
| 2003 | Authentication failed, bad login or password, user not found |
|
||||
| 2004 | Not authorized - current user is not authorized for operation attempted on the resource (insufficient rights) |
|
||||
| 2005 | Object was changed by another user since retrieval (concurrency token mismatch) |
|
||||
| 2010 | Object not found - API could not find the object requested |
|
||||
| 2020 | PUT Id mismatch - object Id does not match route Id |
|
||||
| 2030 | Invalid operation - operation could not be completed, not valid, details in message property |
|
||||
| 2200 | Validation error - general top level indicating object was not valid, specifics in "details" property |
|
||||
| 2201 | Validation error - Field is required but is empty or null |
|
||||
| 2202 | Validation error - Field length exceeded |
|
||||
| 2203 | Validation error - invalid value |
|
||||
3
docs/8.0/ayanova/docs/api-intro.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# DEVELOPERS API
|
||||
|
||||
AyaNova REST API for developers
|
||||
8
docs/8.0/ayanova/docs/api-request-format.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# API request format
|
||||
|
||||
AyaNova uses a RESTful API and supports the [JSON](https://www.json.org/) data interchange format exclusively.
|
||||
No other data formats are supported, your code must supply and consume JSON formatted data.
|
||||
|
||||
All developer interaction with the AyaNova API is via the REST server interface only.
|
||||
|
||||
**TODO FILL THS OUT**
|
||||
222
docs/8.0/ayanova/docs/api-response-format.md
Normal file
@@ -0,0 +1,222 @@
|
||||
# API response format
|
||||
|
||||
AyaNova uses a RESTful API and supports the [JSON](https://www.json.org/) data interchange format exclusively.
|
||||
No other data formats are supported, your code must supply and consume JSON formatted data.
|
||||
|
||||
All developer interaction with the AyaNova API is via the REST server interface only.
|
||||
|
||||
## Successful responses
|
||||
|
||||
### GET RESPONSE
|
||||
|
||||
All successful GET responses have a standard format:
|
||||
|
||||
```json
|
||||
{
|
||||
"Result": {
|
||||
"id": 150,
|
||||
"name": "Handmade Rubber Pizza",
|
||||
...etc...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The results of the response are always contained in the `result` property and could be a single object, a collection or in some cases nothing at all.
|
||||
HTTP Status Code is set in the header.
|
||||
|
||||
### GET COLLECTION RESPONSE
|
||||
|
||||
In the case of a collection most routes support paging, here is an example paged collection request and response:
|
||||
|
||||
Request (note the `offset` and `limit` parameters):
|
||||
|
||||
`http://localhost:3000/api/v8.0/Widget?Offset=2&Limit=3`
|
||||
|
||||
Limit must be a value between 1 and 100.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"result": [
|
||||
...collection...
|
||||
],
|
||||
"paging": {
|
||||
"count": 2000,
|
||||
"offset": 2,
|
||||
"limit": 3,
|
||||
"first": "http://localhost:3000/api/v8.0/Widget?pageNo=1&pageSize=3",
|
||||
"previous": "http://localhost:3000/api/v8.0/Widget?pageNo=1&pageSize=3",
|
||||
"next": "http://localhost:3000/api/v8.0/Widget?pageNo=3&pageSize=3",
|
||||
"last": "http://localhost:3000/api/v8.0/Widget?pageNo=667&pageSize=3"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`Previous` or `next` properties will contain "null" instead of an url on boundaries where there is no record to link to.
|
||||
|
||||
### PUT RESPONSE
|
||||
|
||||
A successful PUT response does not return any data but returns HTTP status code 204 (no content) in the header.
|
||||
|
||||
**WARNING:** Be careful using PUT, you must provide **all** properties or any properties left out will be removed at the server. If you are updating a subset of properties use PATCH instead to save bandwidth.
|
||||
|
||||
### PATCH RESPONSE
|
||||
|
||||
Use PATCH to update specific properties only.
|
||||
|
||||
A successful PATCH response does not return any data but returns HTTP status code 204 (no content) in the header.
|
||||
Patches must conform to the [JSONPATCH](http://jsonpatch.com/) standard.
|
||||
|
||||
### POST RESPONSE
|
||||
|
||||
A successful POST response contains the object posted with it's Id value set and the HTTP status code of 201 (created).
|
||||
|
||||
### DELETE RESPONSE
|
||||
|
||||
A successful DELETE response does not return any data but returns HTTP status code 204 (no content) in the header.
|
||||
|
||||
## Error responses
|
||||
|
||||
### Fundamental errors
|
||||
|
||||
Fundamental, basic errors return a header status code only and are generally self explanatory. For example if you attempt to use XML formatted data with the API you will receive an error response consisting only of the header 415 (unsuported media type).
|
||||
|
||||
401
|
||||
In cases where authentication fails you will receive an empty body response with the header 401 (unauthorized) returned.
|
||||
The details of what was wrong are contained in the header, here is an example of an invalid JWT authentication token:
|
||||
|
||||
```json
|
||||
{
|
||||
"content-length": "0",
|
||||
"date": "Fri, 09 Mar 2018 16:46:07 GMT",
|
||||
"server": "Kestrel",
|
||||
"www-authenticate": "Bearer error=\"invalid_token\", error_description=\"The signature is invalid\"",
|
||||
"content-type": null
|
||||
}
|
||||
```
|
||||
|
||||
### Error response object
|
||||
|
||||
All error responses that return data have an `Error` object property at top level. The error object varies in the properties it contains depending on the error.
|
||||
|
||||
Here is the most minimal error response that returns data:
|
||||
|
||||
```json
|
||||
{
|
||||
"Error": {
|
||||
"Code": "2000",
|
||||
"Message": "Developer readable error message"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
An error object will always contain at minimum an [API error `Code`](api-error-codes.md) property for reference and a `message` property with descriptive text intended for developers.
|
||||
|
||||
### Validation error response object
|
||||
|
||||
Here is an example of a more detailed error response showing validation errors on a request:
|
||||
|
||||
```json hl_lines="4 "
|
||||
{
|
||||
"error": {
|
||||
"code": "2200",
|
||||
"details": [
|
||||
{
|
||||
"message": "255 max",
|
||||
"target": "Name",
|
||||
"error": "LengthExceeded"
|
||||
},
|
||||
{
|
||||
"target": "EndDate",
|
||||
"error": "RequiredPropertyEmpty"
|
||||
},
|
||||
{
|
||||
"target": "Roles",
|
||||
"error": "InvalidValue"
|
||||
}
|
||||
],
|
||||
"message": "Object did not pass validation"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The above example shows multiple validation errors ([API error code](api-error-codes.md) 2200) in several properties when attempting to post an object.
|
||||
|
||||
`details` outer property contains the collection of all validation errors.
|
||||
|
||||
`target` property shows the location of the error. The value of `target` is either a property name corresponding to the property that failed business rule validation or blank if the validation rule applies to the entire object in general.
|
||||
|
||||
`error` property contains the exact [validation error](api-validation-error-codes.md).
|
||||
|
||||
`message` property optionally contains further information of use to the developer, in the example above you can see that the name property has more than the maximum limit of 255 characters.
|
||||
|
||||
### Concurrency error response object
|
||||
|
||||
AyaNova uses "optimistic concurrency" tracking. This means a concurrency token needs to accompany most change (PUT, PATCH) routes.
|
||||
|
||||
Objects that require concurrency tokens to update are the objects that return a `ConcurrencyToken` property on a GET request.
|
||||
|
||||
Every update to an object results in a new concurrency token for that object.
|
||||
|
||||
In a concurrency error response ([API error code](api-error-codes.md) 2005) and header HTTP code 409 (Conflict) is returned if a user attempts to update a record that was changed by another user since it was retrieved (outdated concurrency token provided).
|
||||
|
||||
Here is an example:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"code": "2005",
|
||||
"message": "Object was changed by another user since retrieval (concurrency token mismatch)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Other errors response format
|
||||
|
||||
Errors not related to validation or concurrency may contain one or more nested `innerError` properties. Each nested `innererror` object represents a higher level of detail than its parent. When evaluating errors, clients MUST traverse through all of the nested `innererrors` and choose the deepest one that they understand.
|
||||
|
||||
Here is a sample error response with innererror set:
|
||||
|
||||
```json hl_lines="6 "
|
||||
{
|
||||
"error": {
|
||||
"code": "1005",
|
||||
"message": "Previous passwords may not be reused",
|
||||
"target": "password",
|
||||
"innererror": {
|
||||
"code": "1006",
|
||||
"innererror": {
|
||||
"code": "1007",
|
||||
"minLength": "6",
|
||||
"maxLength": "64",
|
||||
"characterTypes": ["lowerCase","upperCase","number","symbol"],
|
||||
"minDistinctCharacterTypes": "2",
|
||||
"innererror": {
|
||||
"code": "1008"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note that the contents of the `innererror` property may vary and contain distinct properties appropriate to the specific error condition.
|
||||
|
||||
### Server internal errors
|
||||
|
||||
Internal server errors are returned with an HTTP Status Code of 500 and an error object as follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"code": "2002",
|
||||
"message": "See server log for details",
|
||||
"target": "Server internal error"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For security reasons no details of an internal server exception are returned, you must examine the [server log](common-log.md) to see the details.
|
||||
Generally this means the request triggered an unhandled exception which will be logged in detail to the log file.
|
||||
Please report any internal server errors (preferrably with the log showing the exception details) to AyaNova support so we can look into it.
|
||||
78
docs/8.0/ayanova/docs/api-upload-routes.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# API UPLOAD ROUTES
|
||||
|
||||
AyaNova has several API routes for uploading files.
|
||||
|
||||
These routes are all `POST` routes:
|
||||
|
||||
- `/api/v{version}/Attachment`
|
||||
- `/api/v{version}/ImportAyaNova7`
|
||||
- `/api/v{version}/Restore`
|
||||
|
||||
Upload routes are not testable from the API explorer.
|
||||
|
||||
Upload routes expect a form to be uploaded with file content disposition (multipart/form-data).
|
||||
|
||||
AyaNova will allow a maximum of 12gb per file upload for "Utility" routes such as restore and import routes.
|
||||
|
||||
User file routes such as attachments may have a smaller limit, see the User documentation section for those features for limit details.
|
||||
|
||||
Here is a sample minimal HTML form that works with AyaNova file routes:
|
||||
|
||||
```html
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title></title>
|
||||
<script src="https://code.jquery.com/jquery-3.2.1.min.js"
|
||||
integrity="sha256-hwg4gsxgFZhOsEEamdOYGBf13FyQuiTwlAQgxVSNgt4="
|
||||
crossorigin="anonymous"></script>
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function () {
|
||||
$("#upload").click(function (evt) {
|
||||
var fileUpload = $("#files").get(0);
|
||||
var files = fileUpload.files;
|
||||
var data = new FormData();
|
||||
for (var i = 0; i < files.length; i++) {
|
||||
data.append(files[i].name, files[i]);
|
||||
}
|
||||
|
||||
//Attachment upload route requires further form data to designate
|
||||
//the object being attached to by it's type and id:
|
||||
//data.append('AttachToObjectType','2');
|
||||
//data.append('AttachToObjectId','200');
|
||||
|
||||
$.ajax({
|
||||
type: "POST",
|
||||
url: "http://yourserver:7575/api/v8.0/Attachment",
|
||||
headers: {
|
||||
Authorization: "Bearer JWTTokenHere"
|
||||
},
|
||||
contentType: false,
|
||||
processData: false,
|
||||
data: data,
|
||||
success: function (message) {
|
||||
alert("upload successful!");
|
||||
console.log(message);
|
||||
},
|
||||
error: function (error) {
|
||||
console.log(error);
|
||||
alert("There was an error uploading files!");
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<form method="post" enctype="multipart/form-data">
|
||||
<input type="file" id="files" name="files" multiple />
|
||||
<input type="button" id="upload" value="Upload file(s)" />
|
||||
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
14
docs/8.0/ayanova/docs/api-validation-error-codes.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# API VALIDATION ERROR CODES
|
||||
|
||||
All the validation error codes that can be [returned](api-response-format.md) by the API server.
|
||||
In each case there may be more details in the `message` property where appropriate.
|
||||
|
||||
| CODE | MEANING |
|
||||
| ----- | ------------------------------ |
|
||||
| RequiredPropertyEmpty | Required property is missing or empty |
|
||||
| LengthExceeded | A text property has more characters than are allowed. The limit will be returned in the `message` property of the validation error |
|
||||
| NotUnique | A text property is required to be unique but an existing identical value was found in the database |
|
||||
| StartDateMustComeBeforeEndDate | When an object requires a start and end date the start date must be earlier than the end date |
|
||||
| InvalidValue | Generic error indicating an input object's property is not set correctly |
|
||||
| ReferentialIntegrity | Indicates modifying the object (usually a delete) will break the link to other records in the database. The other records need to be modified before continuing |
|
||||
| InvalidOperation | Indicates the operation is invalid, details provided in the `message` |
|
||||
69
docs/8.0/ayanova/docs/common-log.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# LOGGING
|
||||
|
||||
AyaNova keeps a log of important events for troubleshooting purposes.
|
||||
|
||||
AyaNova logs to the file log-ayanova.txt.
|
||||
|
||||
Every Wednesday it archives the current log file to a numbered archive log file, for example log-ayanova-1.txt, log-ayanova-2.txt etc.
|
||||
Any log older than 4 weeks is deleted permanently; 4 total logs are kept, which means a total of one month of logs are kept at any given time.
|
||||
|
||||
## INFORMATION SECURITY AND PRIVACY
|
||||
|
||||
By design and policy no personally identifiable information is intentionally gathered into log files.
|
||||
|
||||
User logins are logged in an anonymized way:
|
||||
|
||||
* AyaNova anonymizes IP addresses by masking the final segment of the address
|
||||
* Passwords are not logged
|
||||
* Users are logged as their internal id number not their name
|
||||
|
||||
Sometimes 3rd party tools may log to the log file and we may need to restrict them to conform to our privacy policy. If you find any personally identifiable information in a log file please advise us immediately at [support@ayanova.com](mailto:support@ayanova.com).
|
||||
|
||||
## Log path
|
||||
|
||||
By default AyaNova logs to a "logs" folder situated below the folder where AyaNova is started.
|
||||
You can override this and set a custom log location by command line argument or by setting the "AYANOVA_LOG_PATH" environment variable.
|
||||
|
||||
Example command line log path parameter
|
||||
|
||||
`"dotnet run --AYANOVA_LOG_PATH=/home/gztw/Documents/temp/cmdlinelogs"`
|
||||
|
||||
If both a command line parameter and an environment variable are set the command line parameter takes precedence.
|
||||
|
||||
## Log level
|
||||
|
||||
AyaNova supports 6 levels of logging, the default level is "Info" which is a medium level and will log general operations and any errors or warnings that may arise.
|
||||
|
||||
**WARNING**
|
||||
AyaNova server will run ***extremely slowly*** when setting a log level lower than Info. A very large amount of information is logged at Debug or lower levels and each item logged takes time away from the normal server operations. Unless directed to by technical support or attempting to diagnose a specific problem, you should avoid setting a log level lower than "Info".
|
||||
|
||||
You can set the log level via environment variable or command line parameter "AYANOVA_LOG_LEVEL".
|
||||
|
||||
For example from the command line
|
||||
|
||||
`"dotnet run --AYANOVA_LOG_LEVEL=Info"`
|
||||
|
||||
Below are listed the accepted values for log level from highest to lowest. A level logs everything at that level and above.
|
||||
So, for example, "Trace" level will log the most and "Fatal" will log the least.
|
||||
|
||||
* `Fatal` - Critical errors that prevent AyaNova from running or force it to shut down. This setting results in the least amount of logging.
|
||||
* `Error` - Logs all of the above plus errors that AyaNova can recover from and continue to operate.
|
||||
* `Warn` - Logs all of the above plus issues that AyaNova can work around but should be looked into and are warnings to system operators.
|
||||
* `Info` - Default level. Logs all the above levels plus normal behavior in low level of detail. Basic general operations are logged like startup and shutdown, configuration changes etc.
|
||||
* `Debug` - Logs all the above plus every request to the server in detail and information about internal operations.
|
||||
* `Trace` - Logs all the above plus highest level detail of internal program code operations. Useful primarily to AyaNova technical support to troubleshoot a specific issue, but too detailed for normal purposes.
|
||||
|
||||
## Troubleshooting logging
|
||||
|
||||
If you are having issues with logging you can enable a logger diagnostic log with a command line parameter or environment variable.
|
||||
Enabling this setting will cause a log file named "log-ayanova-logger.txt" to be written to the folder AyaNova is started in.
|
||||
|
||||
Command line parameter
|
||||
|
||||
`-- AYANOVA_LOG_ENABLE_LOGGER_DIAGNOSTIC_LOG=true`
|
||||
|
||||
or set the environment variable
|
||||
|
||||
`AYANOVA_LOG_ENABLE_LOGGER_DIAGNOSTIC_LOG = true`
|
||||
|
||||
Warning: this diagnostic log should be disabled as soon as it's not required. Unlike the normal log, this log file is not automatically trimmed so it will grow in size forever.
|
||||
BIN
docs/8.0/ayanova/docs/img/ayanovaicon48.png
Normal file
|
After Width: | Height: | Size: 2.6 KiB |
BIN
docs/8.0/ayanova/docs/img/ayanovaicon60x60.png
Normal file
|
After Width: | Height: | Size: 3.0 KiB |
BIN
docs/8.0/ayanova/docs/img/dbdump.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/8.0/ayanova/docs/img/favicon.ico
Normal file
|
After Width: | Height: | Size: 5.3 KiB |
BIN
docs/8.0/ayanova/docs/img/v8ServerMetricsDashboard.png
Normal file
|
After Width: | Height: | Size: 675 KiB |
BIN
docs/8.0/ayanova/docs/img/v8ServerMetricsSnapshotText.png
Normal file
|
After Width: | Height: | Size: 38 KiB |
23
docs/8.0/ayanova/docs/index.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# WELCOME TO AYANOVA 
|
||||
|
||||
## About this documentation
|
||||
|
||||
This manual has the following sections:
|
||||
|
||||
- **User**
|
||||
|
||||
User manual and guide to AyaNova features
|
||||
|
||||
- **Operations**
|
||||
|
||||
Technical guide for installation and ongoing maintenance operations of AyaNova
|
||||
|
||||
- **Developer**
|
||||
|
||||
Guide for software developers to use the AyaNova REST interface
|
||||
|
||||
## Beyond this manual
|
||||
|
||||
If you have a question that is not answered in this manual contact AyaNova support directly: [support@ayanova.com](mailto:support@ayanova.com)
|
||||
|
||||
Or check out our support forum [https://forum.ayanova.com/](https://forum.ayanova.com/)
|
||||
9
docs/8.0/ayanova/docs/intro.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# INTRODUCTION
|
||||
|
||||
How to use this help manual
|
||||
|
||||
## Searching
|
||||
|
||||
- one
|
||||
- two
|
||||
- three
|
||||
33
docs/8.0/ayanova/docs/ops-config-db.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# DATABASE
|
||||
|
||||
AyaNova uses [PostgreSQL](https://www.postgresql.org/) as it's database server in all configurations, no other database is supported.
|
||||
|
||||
## Default connection string
|
||||
|
||||
If no connection string is specified AyaNova will use a default value: "Server=localhost;".
|
||||
|
||||
## Setting the connection string
|
||||
|
||||
AyaNova expects the connection string to be provided by an environment variable or command line parameter named:
|
||||
|
||||
`AYANOVA_DB_CONNECTION`
|
||||
|
||||
Example command line parameter:
|
||||
|
||||
`dotnet run --AYANOVA_DB_CONNECTION="Server=localhost;Database=MyAyaNovaDB;"`
|
||||
|
||||
Example environment variable:
|
||||
|
||||
Windows:
|
||||
|
||||
`set "AYANOVA_DB_CONNECTION=Server=localhost;Database=MyAyaNovaDB;"`
|
||||
|
||||
Linux:
|
||||
|
||||
`export AYANOVA_DB_CONNECTION="Server=localhost;Database=MyAyaNovaDB;"`
|
||||
|
||||
If both a command line parameter and an environment variable are set the command line parameter takes precedence.
|
||||
|
||||
## Default database
|
||||
|
||||
If no default database is specified AyaNova will use the default value: "AyaNova".
|
||||
56
docs/8.0/ayanova/docs/ops-config-default-language.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# DEFAULT LANGUAGE / LOCALE SETTING
|
||||
|
||||
This setting controls the default language for text displayed to users in the AyaNova user interface.
|
||||
|
||||
Users can choose to override this setting in their user account by choosing an another language.
|
||||
|
||||
It will also be used for some messages that originate at the server and are not associated with a particular user where applicable.
|
||||
|
||||
## Default
|
||||
|
||||
If no language is specified or AyaNova can't find the language specified in the database then AyaNova defaults to English locale "en".
|
||||
|
||||
## Built in language values
|
||||
|
||||
In addition to user defined or customized languages, AyaNova comes with 4 "stock" languages built in and accepts a range of values for selecting the stock language.
|
||||
You can use the ISO two letter country code or the English name of the language or that languages own name for the language.
|
||||
|
||||
Valid settings:
|
||||
|
||||
| LANGUAGE | VALID SETTINGS |
|
||||
| ----- | ------------------------------ |
|
||||
| English | "en", "English" |
|
||||
| French | "fr", "French", "Français" |
|
||||
| German | "de", "German", "Deutsch" |
|
||||
| Spanish | "es", "Spanish", "Español" |
|
||||
|
||||
## Custom language values
|
||||
|
||||
AyaNova allows for customized languages and this setting should be the exact name of a custom locale that exists within AyaNova if not using a built in language.
|
||||
|
||||
## Setting
|
||||
|
||||
AyaNova expects the language setting to be provided by an environment variable or command line parameter named
|
||||
|
||||
`AYANOVA_DEFAULT_LANGUAGE`
|
||||
|
||||
The value specified should be a string containing one of the stock valid settings in the table above or the name of a custom locale, for example:
|
||||
`French`
|
||||
or
|
||||
`AcmeWidgetsCustomLocale`
|
||||
|
||||
Example command line parameter
|
||||
|
||||
`dotnet run --AYANOVA_DEFAULT_LANGUAGE="ES"`
|
||||
|
||||
Example environment variable
|
||||
|
||||
Windows
|
||||
|
||||
`set "AYANOVA_DEFAULT_LANGUAGE=DE"`
|
||||
|
||||
Linux / MAC
|
||||
|
||||
`export AYANOVA_DEFAULT_LANGUAGE="MyCustomLocale"`
|
||||
|
||||
If both a command line parameter and an environment variable are set the command line parameter takes precedence.
|
||||
41
docs/8.0/ayanova/docs/ops-config-environment-variables.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# ENVIRONMENT VARIABLES / COMMAND LINE ARGUMENTS LIST
|
||||
|
||||
Most of the AyaNova configuration is stored inside the database, however anything related to starting the server can not be stored in the database and so environment variables or command line parameters are used to control server start up settings.
|
||||
|
||||
These values can all be specified as an environment variable or as a command line parameter. In cases where both are specified, the command line parameter takes precedence.
|
||||
|
||||
## DATABASE
|
||||
|
||||
- [AYANOVA_DB_CONNECTION](ops-config-db.md)
|
||||
|
||||
## FILE STORAGE LOCATIONS
|
||||
|
||||
- [AYANOVA_FOLDER_BACKUP_FILES](ops-config-folder-backup-files.md)
|
||||
- [AYANOVA_FOLDER_USER_FILES](ops-config-folder-user-files.md)
|
||||
|
||||
## LOGGING
|
||||
|
||||
- [AYANOVA_LOG_ENABLE_LOGGER_DIAGNOSTIC_LOG](common-log.md#troubleshooting-logging)
|
||||
- [AYANOVA_LOG_LEVEL](common-log.md#log-level)
|
||||
- [AYANOVA_LOG_PATH](common-log.md#log-path)
|
||||
|
||||
## LANGUAGE / LOCALE
|
||||
|
||||
- [AYANOVA_DEFAULT_LANGUAGE](ops-config-default-language.md)
|
||||
|
||||
## API
|
||||
|
||||
- [AYANOVA_USE_URLS](ops-config-use-urls.md)
|
||||
- [AYANOVA_FOLDER_USER_FILES](ops-config-folder-user-files.md)
|
||||
- [AYANOVA_FOLDER_BACKUP_FILES](ops-config-folder-backup-files.md)
|
||||
|
||||
## METRICS
|
||||
|
||||
- [AYANOVA_METRICS_USE_INFLUXDB](ops-metrics.md)
|
||||
- [AYANOVA_METRICS_INFLUXDB_BASEURL](ops-metrics.md)
|
||||
- [AYANOVA_METRICS_INFLUXDB_DBNAME](ops-metrics.md)
|
||||
- [AYANOVA_METRICS_INFLUXDB_CONSISTENCY](ops-metrics.md)
|
||||
- [AYANOVA_METRICS_INFLUXDB_USERNAME](ops-metrics.md)
|
||||
- [AYANOVA_METRICS_INFLUXDB_PASSWORD](ops-metrics.md)
|
||||
- [AYANOVA_METRICS_INFLUXDB_RETENSION_POLICY](ops-metrics.md)
|
||||
- [AYANOVA_METRICS_INFLUXDB_CREATE_DATABASE_IF_NOT_EXISTS](ops-metrics.md)
|
||||
35
docs/8.0/ayanova/docs/ops-config-folder-backup-files.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# BACKUP FILES FOLDER SETTING
|
||||
|
||||
This setting controls where AyaNova stores backup and restore files used by the backup and restore features built into AyaNova.
|
||||
In addition this folder is used when importing from an AyaNova 7 export file.
|
||||
|
||||
Warning: this folder MUST NOT be the same location set for [AYANOVA_FOLDER_USER_FILES](ops-config-folder-user-files.md) or AyaNova will not start.
|
||||
|
||||
## Default
|
||||
|
||||
If no override is specified AyaNova will store backup files in a `backupfiles` folder in the AyaNova root folder where AyaNova is started from.
|
||||
|
||||
## Overriding
|
||||
|
||||
AyaNova expects the backup files folder path to be provided by an environment variable or command line parameter named
|
||||
|
||||
`AYANOVA_FOLDER_BACKUP_FILES`
|
||||
|
||||
The value specified should be a string containing a fully qualified file path for the platform, for example:
|
||||
`c:\data\ayanova\backupfiles`
|
||||
|
||||
Example command line parameter
|
||||
|
||||
`dotnet run --AYANOVA_FOLDER_BACKUP_FILES="/var/lib/ayanova/backupfiles"`
|
||||
|
||||
Example environment variable
|
||||
|
||||
Windows
|
||||
|
||||
`set "AYANOVA_FOLDER_BACKUP_FILES=c:\data\ayanova\backupfiles"`
|
||||
|
||||
Linux / MAC
|
||||
|
||||
`export AYANOVA_FOLDER_BACKUP_FILES="/var/lib/ayanova/backupfiles"`
|
||||
|
||||
If both a command line parameter and an environment variable are set the command line parameter takes precedence.
|
||||
35
docs/8.0/ayanova/docs/ops-config-folder-user-files.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# USER FILES FOLDER SETTING
|
||||
|
||||
This setting controls where AyaNova stores user uploaded files used by features that allow file attachment or uploads.
|
||||
AyaNova stores these files with random names in the folder specified.
|
||||
|
||||
Warning: this folder MUST NOT be the same location set for [AYANOVA_FOLDER_BACKUP_FILES](ops-config-folder-backup-files.md) or AyaNova will not start.
|
||||
|
||||
## Default
|
||||
|
||||
If no override is specified AyaNova will store user files in a `userfiles` folder in the AyaNova root folder where AyaNova is started from.
|
||||
|
||||
## Overriding
|
||||
|
||||
AyaNova expects the user files folder path to be provided by an environment variable or command line parameter named
|
||||
|
||||
`AYANOVA_FOLDER_USER_FILES`
|
||||
|
||||
The value specified should be a string containing a fully qualified file path for the platform, for example:
|
||||
`c:\data\ayanova\userfiles`
|
||||
|
||||
Example command line parameter
|
||||
|
||||
`dotnet run --AYANOVA_FOLDER_USER_FILES="/var/lib/ayanova/userfiles"`
|
||||
|
||||
Example environment variable
|
||||
|
||||
Windows
|
||||
|
||||
`set "AYANOVA_FOLDER_USER_FILES=c:\data\ayanova\userfiles"`
|
||||
|
||||
Linux / MAC
|
||||
|
||||
`export AYANOVA_FOLDER_USER_FILES="/var/lib/ayanova/userfiles"`
|
||||
|
||||
If both a command line parameter and an environment variable are set the command line parameter takes precedence.
|
||||
36
docs/8.0/ayanova/docs/ops-config-use-urls.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# PORT / URL SETTING
|
||||
|
||||
You can control the port and URL that the AyaNova server will listen on via environment variable or command line parameter.
|
||||
|
||||
## Default
|
||||
|
||||
If no override is specified AyaNova will use the following default value:
|
||||
|
||||
`"http://*:7575"`
|
||||
|
||||
This means AyaNova will listen on port 7575
|
||||
|
||||
## Overriding
|
||||
|
||||
AyaNova expects the PORT and URL to be provided by an environment variable or command line parameter named
|
||||
|
||||
`AYANOVA_USE_URLS`
|
||||
|
||||
The value specified should be a string of one or more semicolon separated values, for example:
|
||||
`http://*:5000;http://localhost:5001;https://hostname:5002`
|
||||
|
||||
Example command line parameter
|
||||
|
||||
`dotnet run --AYANOVA_USE_URLS="http://*:5000"`
|
||||
|
||||
Example environment variable
|
||||
|
||||
Windows
|
||||
|
||||
`set "AYANOVA_USE_URLS=http://*:5000"`
|
||||
|
||||
Linux / MAC
|
||||
|
||||
`export AYANOVA_USE_URLS="http://*:5000"`
|
||||
|
||||
If both a command line parameter and an environment variable are set the command line parameter takes precedence.
|
||||
28
docs/8.0/ayanova/docs/ops-error-codes.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# SERVER ERROR CODES
|
||||
|
||||
AyaNova will provide a server error code when an error arises.
|
||||
All AyaNova server error codes start with the letter E followed by a number in the range 1000-1999.
|
||||
|
||||
Server error codes are different from [API error codes](api-error-codes.md) which are intended for software and developers using the AyaNova developers API.
|
||||
|
||||
The purpose of these server error codes is to make it easier to look them up in this manual and easily communicate errors to technical support if necessary.
|
||||
|
||||
In most cases where an error occurs there will be more detailed information about the error in the [log file](common-log.md).
|
||||
|
||||
Here are all the error codes that can be returned by the AyaNova server:
|
||||
|
||||
| CODE | MEANING |
|
||||
| ----- | ------------------------------ |
|
||||
| E1000 | Could not connect to the database specified in the [connection string](ops-config-db.md). |
|
||||
| E1010 | Could not find wwwRoot folder. AyaNova must be started from the folder immediately above wwwRoot. Generally the start folder should be the same folder as AyaNova.dll file. |
|
||||
| E1012 | Missing resource folder. AyaNova was started from the wrong location or the resource folder was not installed properly. This is required to intialize a new AyaNova database |
|
||||
| E1013 | Missing language resource file was deleted, renamed or not installed correctly. Resource language files are required to load into a new AyaNova database to display text in several languages for the user interface |
|
||||
| E1015 | Missing language. One or more of the stock languages were not found in the database or a custom language specified in the config setting [AYANOVA_DEFAULT_LANGUAGE](ops-config-default-language.md) is missing from the database. Log will have details. |
|
||||
| E1020 | Licensing related error. The message will contain the explanation |
|
||||
| E1030 | AyaNova database failed an integrity check. Contact support immediately. |
|
||||
| E1040 | File location [environment variables](ops-config-environment-variables.md) for backup files and user files were found to be the same location and must not be |
|
||||
| E1050 | XXXXXXXX |
|
||||
| E1060 | XXXXXXXX |
|
||||
| E1070 | XXXXXXXX |
|
||||
| E1080 | XXXXXXXX |
|
||||
| E1090 | AyaNova failed to start due to an unexpected error during boot. |
|
||||