Skip to content

Commit 34a43e0

Browse files
committed
archiver-appliance: add sitespecific customization
This removes the big old "Testing!" in the webpage's main banner
1 parent b785c3d commit 34a43e0

File tree

5 files changed

+227
-1
lines changed

5 files changed

+227
-1
lines changed

pkgs/epnix/tools/archiver-appliance/default.nix

+10-1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
tomcat9,
1111
python3Packages,
1212
python3,
13+
sitespecific ? ./sitespecific/epnix,
1314
}:
1415
stdenvNoCC.mkDerivation (finalAttrs: {
1516
pname = "archiver-appliance";
@@ -55,7 +56,15 @@ stdenvNoCC.mkDerivation (finalAttrs: {
5556
# Some PV tests fail
5657
#doCheck = true;
5758

58-
TOMCAT_HOME = "${tomcat9}";
59+
env = {
60+
ARCHAPPL_SITEID = "epnix";
61+
TOMCAT_HOME = "${tomcat9}";
62+
};
63+
64+
postPatch = ''
65+
echo "Copying sitespecific directory"
66+
cp -rT ${sitespecific} src/sitespecific/epnix
67+
'';
5968

6069
installPhase = ''
6170
runHook preInstall
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<project name="epnix" default="updateui">
3+
<property name="site" value="EPNix"/>
4+
<target name="echo">
5+
<echo message="From within the site specific build for ${archapplsite}."/>
6+
<echo message="${site} SITE ID : ${archapplsite}" />
7+
<echo message="${site} : STAGE_FOLDER - ${stage}" />
8+
<echo message="${site} : Classes folder - ${classes}" />
9+
</target>
10+
<target name="updateui" depends="updateTemplate">
11+
<echo message="${site} : ${site} image files to STAGE_FOLDER/org/epics/archiverappliance/staticcontent/img" />
12+
<copy todir="${stage}/org/epics/archiverappliance/staticcontent/img" overwrite="true">
13+
<fileset dir="img" />
14+
</copy>
15+
</target>
16+
<target name="updateTemplate" depends="echo">
17+
<echo message="${site} : Apply template_changes.html to mgmt to STAGE_FOLDER/org/epics/archiverappliance/staticcontent" />
18+
<java classname = "org.epics.archiverappliance.mgmt.bpl.SyncStaticContentHeadersFooters"
19+
fork="true"
20+
failonerror="true" >
21+
<classpath path="${classes}" />
22+
<arg value="template_changes.html" />
23+
<arg value="${stage}/org/epics/archiverappliance/mgmt/staticcontent/"/>
24+
</java>
25+
</target>
26+
</project>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
# Specify arbitrary name value pairs here; names and values are strings...
2+
3+
# The PlainPB plugin replaces the characters specified in this list with the "/" character to generate path names.
4+
# This has the effect of distributing the files containing data into multiple folders
5+
# Getting a good distribution is important for good performance; most file systems do not perform well if they have 10000's of files in a folder.
6+
# This is passed into java's String.replaceAll; so watch out for regex issues.
7+
org.epics.archiverappliance.config.ConvertPVNameToKey.siteNameSpaceSeparators = [\\:\\-]
8+
# To avoid substring issues, each pathname is terminated with a character that is guaranteed not to be in the path name (because of said replacement above)
9+
org.epics.archiverappliance.config.ConvertPVNameToKey.siteNameSpaceTerminator = :
10+
11+
# We enforce a site specific minimum sampling period (read maximum rate) using this value
12+
org.epics.archiverappliance.mgmt.bpl.ArchivePVAction.minimumSamplingPeriod = 0.1
13+
14+
# We have a system wide buffer size (specified in seconds) for buffering the data in engine memory
15+
# This is a compromise between various factors including garbage collection, IOPS of the short term store and memory availability.
16+
org.epics.archiverappliance.config.PVTypeInfo.secondsToBuffer = 2
17+
18+
# Per FRIB/PSI, we have a configuration knob to increase/decrease the sample buffer size used by the engine for all PV's.
19+
# This is a double - by default 1.0 which means we leave the buffer size computation as is.
20+
# If you want to increase buffer size globally to 150% of what is normally computed, set this to 1.5
21+
org.epics.archiverappliance.config.PVTypeInfo.sampleBufferCapacityAdjustment = 1.0
22+
23+
# The getData retrieval calls support a usereduced parameter which is the entry into sparsification.
24+
# It is possible for sites to configure their default sparsification post processor using this parameter.
25+
# For example, at SLAC we will probably use divide the request into two parts, data less than two weeks old is raw data while data older than two weeks old is sparsified using FirstSamplePP
26+
org.epics.archiverappliance.retrieval.DefaultUseReducedPostProcessor=org.epics.archiverappliance.retrieval.postprocessors.TwoWeekRaw
27+
28+
# If a PV used to be archived by the appliance but has since been retired, data does not get returned from the request even though data might exist in the
29+
# datastores. Setting this option to 'true' means that if a request is received for a PV that is not currently being archived (i.e. the PVTypeInfo is null),
30+
# it will automatically then run a search in the datastores to see if there is any data available for this PV within the timeframe and return it.
31+
# org.epics.archiverappliance.retrieval.SearchStoreForRetiredPvs=true
32+
33+
# This propery has been deprecated as it can easily lead to data loss.
34+
# The maximum number of datastores/stages/lifetimeids in this installation.
35+
# Specifically, this returns the maximum length of the datastores element across all PVTypeInfo's in this installation.
36+
# For example, in an typical installation with a short term store, a medium term store and a long term store, this should return 3.
37+
# Various optimizations are based on the maximum number of stages data goes thru in the archiver appliance.
38+
# We create one ETL thread per lifetime transition
39+
# org.epics.archiverappliance.config.PVTypeInfo.maximumNumberOfLifetimesInInstallation=5
40+
41+
# Use this property to control whether you want to use CAJ or the JNI implementation in JCA.
42+
org.epics.archiverappliance.engine.epics.JCAConfigGen.useCAJ=true
43+
# This controls the dispatcher used to dispatch ChannelAccess events.
44+
org.epics.archiverappliance.engine.epics.JCAConfigGen.dispatcher=org.epics.archiverappliance.engine.epics.JCAEventDispatcherBasedOnPVName
45+
#org.epics.archiverappliance.engine.epics.JCAConfigGen.dispatcher=gov.aps.jca.event.QueuedEventDispatcher
46+
# For faster reconnect times, we may want to use more than one JCAContext/CAJContext. This controls the number of JCACommandThreads and thus the number of JCAContext/CAJContext.
47+
# Each JCACommandThread launches aprox 4 threads in all in CAJ - one CAJ search thread (UDP); a couple of TCP threads and the JCACommand thread that controls them.
48+
# Routing all PVs thru fewer contexts seems to result in larger reconnect times.
49+
org.epics.archiverappliance.engine.epics.commandThreadCount=10
50+
51+
# Maximum amount of clock drift ( in seconds ) between appliance and IOC.
52+
# Samples more than this many seconds in the future are discarded for data quality reasons.
53+
# Samples from the second sample onwards (after a PV connect) more than this seconds in the past are discarded for data quality reasons.
54+
# org.epics.archiverappliance.engine.epics.server_ioc_drift_seconds=1800
55+
56+
# The SCAN sampling method establishes camonitors and skips samples that are less than the sampling method.
57+
# However, the IOC itself could have some jitter and this will cause the SCAN'ed PV to miss samples that arrive a few milliseconds early.
58+
# Use this to control how much jitter you want to accommodate.
59+
# This is a multiplier; so if your sampling method if 1.0 seconds and the jitter factor is 0.95, then the final sampling method is 950 milliseconds.
60+
# Alternatively, to establish a jitter of 5%, use 0.95; to establish a jitter of 10%, use 0.9 etc...
61+
org.epics.archiverappliance.engine.epics.scanJitterFactor=0.95
62+
63+
# We use a ScheduledThreadPoolExecutor for implementing SCAN PV's.
64+
# If you have a lot of PV's under SCAN and some of these take time; it is possible to miss some SCAN samples because we just don't get to the PV in time.
65+
# In this case, you can increase the number of SCAN threads used; the need for this is probably pretty rare
66+
org.epics.archiverappliance.engine.epics.scanThreadCount=1
67+
68+
# How should ETL handle out of space situations.
69+
# See the javadoc of org.epics.archiverappliance.etl.common.OutOfSpaceHandling for some options
70+
org.epics.archiverappliance.etl.common.OutOfSpaceHandling=DELETE_SRC_STREAMS_IF_FIRST_DEST_WHEN_OUT_OF_SPACE
71+
72+
# A list of fields for PVs that are monitored and maintained in the engine.
73+
# These are used when displaying the PV in visualization tools like the ArchiveViewer as additional information for the PV.
74+
# Some of these could be archived along with the PV but need not be.
75+
# In this case, the engine simply maintains the latest copy in memory and this is served up when data from the engine in included in the stream.
76+
# This is a comma separated list of fields
77+
# For example, if you have an fields for the owner of a PV, you could add that here.
78+
# We add the DESC in addition to the typical limits.
79+
org.epics.archiverappliance.config.RuntimeKeys=DESC
80+
81+
# On CA disconnects, occasionally, we do not reconnect back to the PV in time.
82+
# This timeout governs the delay between the disconnect and when we do a pause/resume to convert reconnects into ca searches.
83+
# If you want to turn off this functionality, simply set this value to 0
84+
org.epics.archiverappliance.engine.util.EngineContext.disconnectCheckTimeoutInMinutes = 0
85+
86+
# Configure how fast the engine starts up PVs
87+
# To prevent broadcast storms, we pause for pausePerGroup seconds for every pausePerGroup PVs
88+
# org.epics.archiverappliance.engine.archivePVSonStartup.pausePerGroupPVCount = 2000
89+
# org.epics.archiverappliance.engine.archivePVSonStartup.pausePerGroupPauseTimeInSeconds = 2
90+
91+
# This is a more complex one.
92+
# Ideally, we'll look in the datastores for the last known sample and use that as a boundary condition for future samples.
93+
# But this can be expensive in systems with large numbers of PV's and can delay startup of PV's significantly.
94+
# In these case, we sacrifice the samples of PV's with ancient timestamps and only record samples from "now on".
95+
# But this may be a more suitable tradeoff for such installations.
96+
# If you set this to false, the boundary condition is the server's current timestamp when the PV is being started.
97+
# org.epics.archiverappliance.engine.archivePVSonStartup.determineLastKnownEventFromStores = true
98+
99+
# One can define a set of named flags (booleans) that can be used to control various processes in the system
100+
# For example, you can control the ETL process in a PlainPBStoragePlugin using a named flag to accomplish a gated ETL.
101+
# Named flags are not persistent; each time the server starts up, all the named flags are set to false
102+
# If a named flag is not defined, it defaults to false.
103+
# You can optionally load values for named flags by specifying the full path to a java properties file here.
104+
# org.epics.archiverappliance.config.NamedFlags.readFromFile=/nfs/fullpathto/namedFlags.properties
105+
106+
# We pick up named client configuration JSON files for the archive viewer from here.
107+
# To turn off this features, simply comment this property.
108+
#org.epics.archiverappliance.retrieval.bpl.GetClientConfiguration.DocumentRoot=/nfs/slac/g/lcls/tools/ArchiveBrowser/config
109+
110+
# We impose a throttle on archive requests that are pending. This is an attempt to conserve resources on the engine and also to control CA search broadcast storms
111+
# What this means is that if you have more that this many invalid PV's (PVs that will never connect) in the archive workflow; the ones later in the queue will never get fulfilled unless the archive request queue is cleaned up.
112+
# See abortNeverConnectedPV for a example of how to do this.
113+
# Use this property to increase this batch size.
114+
# Note that there is a limit on how high this can go. We sample the PV for a minute to determine storage rates etc; if this is set very high (> 10000), there may not be enough time to perform the sampling for the archive workflow.
115+
# org.epics.archiverappliance.mgmt.MgmtRuntimeState.archivePVWorkflowBatchSize = 1000
116+
117+
# For larger archivePVWorkflowBatchSize, you may need a longer time between the workflow state machine ticks.
118+
# Set this to 60 seconds or more if you are using archivePVWorkflowBatchSize's of 5000 or greater.
119+
# The workflow may take a little longer per PV but overall may be much faster.
120+
# org.epics.archiverappliance.mgmt.MgmtRuntimeState.archivePVWorkflowTickSeconds = 10
121+
122+
# Abort PV's in the archive PV workflow after this many minutes if the archiver is not able to connect to the PV.
123+
# The workflow can take a few minutes; so this should be set to a reasonable value (for example, 1 minute would mean that no PV would complete the workflow)
124+
# By default, this is set to a day. So, if the archiver cannot connect to the PV in a day, it will give up and abort.
125+
# To turn this off, set this to -1.
126+
# org.epics.archiverappliance.mgmt.MgmtRuntimeState.abortArchiveRequestInMins = 1440
127+
128+
# The initial delay after startup before starting processing of archiving requests in seconds
129+
# As we now wait for the entire cluster to load up before starting the archive workflows, set this value if you need to delay the start of archiving workflows for other reasons.
130+
# org.epics.archiverappliance.mgmt.MgmtRuntimeState.initialDelayBeforeStartingArchiveRequests = 10
131+
132+
# See org.epics.archiverappliance.engine.pv.EngineContext for these two entries
133+
# org.epics.archiverappliance.engine.maximum_disconnected_channel_percentage_before_starting_metachannels = 5.0
134+
# org.epics.archiverappliance.engine.metachannels_to_start_at_a_time = 10000
135+
136+
# Choose whether to use pvAccess or Channel Access by default
137+
# Options are CA for Channel Access or PVA for pvAccess
138+
# Default is CA
139+
# org.epics.archiverappliance.mgmt.bpl.ArchivePVAction.defaultAccessProtocol = PVA
140+
141+
# For single PV requests, use a HTTP redirect to the appliance containg the PV instead of proxying
142+
# This may be faster in some situations.
143+
# However, this may not work in cases where the appliancs are accessed behind a load balancer.
144+
org.epics.archiverappliance.retrieval.DataRetrievalServlet.proxyRetrievalRequest=false

0 commit comments

Comments
 (0)