-
Notifications
You must be signed in to change notification settings - Fork 48
/
Copy pathderecho-sample.cfg
215 lines (210 loc) · 10.1 KB
/
derecho-sample.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
[DERECHO]
# contact ip - the active leader's ip address
contact_ip = 127.0.0.1
# contact port - the active leader's gms port
contact_port = 23580
# list of leaders to contact during a restart in priority order
restart_leaders = 127.0.0.1,127.0.0.1
# list of GMS ports of the restart leaders, in the same order
restart_leader_ports = 23580,23581
# derecho gms port
gms_port = 23580
# derecho state-transfer port
state_transfer_port = 28366
# sst tcp port
sst_port = 37683
# rdmc tcp port
rdmc_port = 31675
# externel tcp port listening to external clients
external_port = 32645
# Maximum possible node ID value
# Node IDs are 32-bit integers, but all Derecho systems will have
# many fewer nodes than this. Derecho will pre-allocate space for a
# P2P connection for each possible node ID, each of which is about
# 48 bytes, so keeping the maximum node ID value as low as possible
# saves memory.
max_node_id = 1024
# When the system is idle, the p2p event loop goes to 'napping' mode, in which it sleeps for a short period of time
# periodically between checking incoming messages. Before etting into the 'napping' mode, it has to wait for
# 'p2p_loop_busy_wait_before_sleep_ms' milliseconds. The default value is 250 ms. Pick a value to balance between CPU
# utilization and application latency.
p2p_loop_busy_wait_before_sleep_ms = 250
# this is the frequency of the failure detector thread for MulticastGroup and P2PConnectionManager.
# It is best to leave this to 1 ms for RDMA. If it is too high,
# you run the risk of overflowing the queue of outstanding sends.
heartbeat_ms = 1
# sst poll completion queue timeout in millisecond
sst_poll_cq_timeout_ms = 100
# This is the maximum time a restart leader will wait for other nodes to restart
# before proceeding with the restart if it has a quorum; it's a "grace period"
# that allows more nodes to be included in the restart quorum at the cost of
# taking longer to restart.
restart_timeout_ms = 2000
# This setting controls the experimental "backup restart leaders" feature. If
# false, only the first leader in the restart_leaders list will be contacted
# during a restart (the rest are ignored), and the group will fail to restart
# if this leader crashes. If true (enabled), restarting nodes will try
# contacting the backup leaders in order once they detect that the first restart
# leader has failed. The default is false since failure detection during restart
# is unreliable and may cause a slow restart leader to be treated as failed.
enable_backup_restart_leaders = false
# disable partitioning safety
# By disabling this feature, the derecho is allowed to run when active
# members cannot form a majority. Please be aware of the 'split-brain'
# syndrome:https://en.wikipedia.org/wiki/Split-brain and make sure your
# application is fine with it.
# To help the user play with derecho at beginning, we disabled the
# partitioning safety. We suggest to set it to false for serious deployment
disable_partitioning_safety = true
# maximum payload size for P2P requests
max_p2p_request_payload_size = 10240
# maximum payload size for P2P replies
max_p2p_reply_payload_size = 10240
# window size for P2P requests and replies
p2p_window_size = 16
# Subgroup configurations
# - The default subgroup settings
[SUBGROUP/DEFAULT]
# maximum payload size
# Any message with size large than this has to be broken
# down to multiple messages.
# Large message consumes memory space because the memory buffers
# have to be pre-allocated.
max_payload_size = 10240
# maximum reply payload size
# This is for replies generated by ordered sends in the subgroup
max_reply_payload_size = 10240
# maximum smc (SST's small message multicast) payload size
# If the message size is smaller or equal to this size,
# it will be sent using SST multicast, otherwise it will
# try RDMC if the message size is smaller than max_payload_size.
max_smc_payload_size = 10240
# block size depends on your max_payload_size.
# It is only relevant if you are ever going to send a message using RDMC.
# In that case, it should be set to the same value as the max_payload_size,
# if the max_payload_size is around 1 MB. For very large messages, the block # size should be a few MBs (1 is fine).
block_size = 1048576
# message window size
# the length of the message pipeline
window_size = 16
# the send algorithm for RDMC. Other options are
# chain_send, sequential_send, tree_send
rdmc_send_algorithm = binomial_send
# - SAMPLE for large message settings
[SUBGROUP/LARGE]
max_payload_size = 102400
max_reply_payload_size = 102400
max_smc_payload_size = 10240
block_size = 10240
window_size = 3
rdmc_send_algorithm = binomial_send
# - SAMPLE for small message settings
[SUBGROUP/SMALL]
max_payload_size = 100
max_reply_payload_size = 100
max_smc_payload_size = 100
# TODO: avoid creat rdmc group if max_payload_size > max_smc_payload_size
block_size = 1024
window_size = 50
rdmc_send_algorithm = binomial_send
# Persistent configurations
[PERS]
# persistent directory for file system-based logfile.
file_path = .plog
ramdisk_path = /dev/shm/volatile_t
# Reset persistent data
# CAUTION: "reset = true" removes existing persisted data!!!
reset = false
# Max number of the log entries in each persistent<T>, default to 1048576
max_log_entry = 1048576
# Max data size in bytes for each persistent<T>, default to 512GB
max_data_size = 549755813888
# Path to the file storing this node's private key for digital signatures.
# The file must be in PEM format, and must not have a password associated with it.
# If no persistent objects in the Derecho group have signatures enabled, this
# file need not exist (it will not be used if there are no signatures).
private_key_file = private_key.pem
# Logger configurations
[LOGGER]
# Default log name. This determines the file name of log files.
default_log_name = derecho_debug
# default log level
# Available options:
# trace, debug, info, warning, error, critical, off
default_log_level = info
# RPC module log level. Defaults to default_log_level if not set.
rpc_log_level = info
# SST module log level. Defaults to default_log_level if not set.
sst_log_level = info
# Persistence module log level. Defaults to default_log_level if not set.
persistence_log_level = info
# Whether logs should be printed to the terminal as well as saved to files (default is true)
log_to_terminal = true
# The number of older log files to save. Log files are rotated automatically
# when the current one reaches 1MB in size. Default is 3.
log_file_depth = 3
# optional layout configurations
[LAYOUT]
# In this section you can optionally specify the layout of the derecho group. Plesae note that you can also define the
# layout programmably with the predefined SubgroupInfo objects and DefaultSubgroupAllocator class. Or, you can define
# a SubgroupInfo object with customized view generation code. If you choose to use this layout configuration, you
# MUST initialize the SubgroupInfo object using derecho::make_subgroup_allocator<>(). By default,
# derecho::make_subgroup_allocator<>() first tries the 'json_layout' string. If failed, it then tries
# 'json_layout_file' file. If no layout is found, an exception will be thrown.
#
# The 'json_layout' string and the contents of 'json_layout_file' MUST use the format defined below.
# The layout configuration is a JSON array. Each element of the array is a dictionary specifying the layout of
# the subgroups of one subgroup type. The order of the elements corresponds to the order of the subgroup types in the
# derecho group definition/declaration. The array length MUST match the number of the derecho subgroup types.
#
# Each of the array element has two entries. The "type_alias" entry is a short name for the corresponding subgroup
# type; while the 'layout' entry is also an array, each element of which defines the layout of a subgroup of the
# corresponding subgroup type.
#
# A subgroup layout consists of five entries. Each entry is an array with one element for each of the shard in the
# incremental order of shard index (0,1,2,...).
#
# 'min_nodes_by_shard' and 'max_nodes_by_shard' specifies the minimum and the maximum number of nodes in each shard.
#
# 'reserved_node_ids_by_shard' specifies a set of node ids reserved for each shard. In other words, if a new node joins
# with an id inclued in 'reserved_node_ids_by_shard' of shard S, it will be allocated to shard S. Please note that if a
# node id appears in 'reserved_node_ids_by_shard' of two subgroups, that node will be allocated to both of the
# subgroups, naturally enables the long desired overlapping subgroup feature. The 'reserved_node_ids_by_shard' sets of
# the same subgroup should not overlap because otherwise, a node exists in two shards to violate the definition of
# sharding. Sometimes, we need to specify the senders of a shards for reasons. Putting a '*' sign in front of the node
# id tells derecho that this node will be a sender.
#
# 'deliver_modes_by_shard' specifies the delivery mode of each shard. Only two delivery modes are supported: "Ordered"
# and "Raw".
#
# 'profiles_by_shard' specifies the profile sections ([SUBGROUP/<profile>]) which contains the communication parameters
# for each shard.
#
# json_layout = '
# [
# {
# "type_alias": "TestType1",
# "layout": [
# {
# "min_nodes_by_shard": ["2"],
# "max_nodes_by_shard": ["3"],
# "reserved_node_ids_by_shard": [["*1", "2", "3"]],
# "delivery_modes_by_shard": ["Ordered"],
# "profiles_by_shard": ["Default"]
# }
# ]
# },
# {
# "type_alias": "TestType2",
# "layout": [
# {
# "min_nodes_by_shard": ["2"],
# "max_nodes_by_shard": ["3"],
# "reserved_node_ids_by_shard": [["2", "3", "4"]],
# "delivery_modes_by_shard": ["Ordered"],
# "profiles_by_shard": ["Default"]
# }
# ]
# }
# ]'
# json_layout_file = json_cfgs/layout.json