Skip to content

Commit 4974ad0

Browse files
committed
HBASE-27834 Introduce ha-hdfs overlay
1 parent 50aa25d commit 4974ad0

12 files changed

Lines changed: 1174 additions & 0 deletions

File tree

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one
2+
# or more contributor license agreements. See the NOTICE file
3+
# distributed with this work for additional information
4+
# regarding copyright ownership. The ASF licenses this file
5+
# to you under the Apache License, Version 2.0 (the
6+
# "License"); you may not use this file except in compliance
7+
# with the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
---
17+
- op: replace
18+
# Using replace. 'add' seems to replace anyways.
19+
path: /spec/template/spec/affinity/podAntiAffinity
20+
value:
21+
requiredDuringSchedulingIgnoredDuringExecution:
22+
- labelSelector:
23+
matchLabels:
24+
role: datanode
25+
topologyKey: kubernetes.io/hostname
Lines changed: 325 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,325 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<!--
3+
Licensed to the Apache Software Foundation (ASF) under one
4+
or more contributor license agreements. See the NOTICE file
5+
distributed with this work for additional information
6+
regarding copyright ownership. The ASF licenses this file
7+
to you under the Apache License, Version 2.0 (the
8+
"License"); you may not use this file except in compliance
9+
with the License. You may obtain a copy of the License at
10+
11+
http://www.apache.org/licenses/LICENSE-2.0
12+
13+
Unless required by applicable law or agreed to in writing, software
14+
distributed under the License is distributed on an "AS IS" BASIS,
15+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
See the License for the specific language governing permissions and
17+
limitations under the License.
18+
-->
19+
<configuration>
20+
<property>
21+
<!--https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsBlockPlacementPolicies.html-->
22+
<name>dfs.block.replicator.classname</name>
23+
<value>org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant</value>
24+
</property>
25+
<property>
26+
<name>dfs.blocksize</name>
27+
<value>64m</value>
28+
</property>
29+
<property>
30+
<name>dfs.datanode.address</name>
31+
<value>0.0.0.0:9866</value>
32+
</property>
33+
<property>
34+
<name>dfs.datanode.balance.bandwidthPerSec</name>
35+
<value>20m</value>
36+
</property>
37+
<property>
38+
<name>dfs.datanode.balance.max.concurrent.moves</name>
39+
<value>100</value>
40+
</property>
41+
<property>
42+
<name>dfs.datanode.data.dir</name>
43+
<value>${env.DATANODE_DATA_DIR}</value>
44+
</property>
45+
<property>
46+
<name>dfs.datanode.failed.volumes.tolerated</name>
47+
<value>0</value>
48+
</property>
49+
<property>
50+
<name>dfs.datanode.du.reserved</name>
51+
<value>1073741824</value>
52+
</property>
53+
<property>
54+
<name>dfs.datanode.fileio.profiling.sampling.percentage</name>
55+
<value>10</value>
56+
</property>
57+
<property>
58+
<name>dfs.datanode.http.address</name>
59+
<value>0.0.0.0:9864</value>
60+
</property>
61+
<property>
62+
<name>dfs.datanode.https.address</name>
63+
<value>0.0.0.0:9865</value>
64+
</property>
65+
<property>
66+
<name>dfs.datanode.ipc.address</name>
67+
<value>0.0.0.0:9867</value>
68+
</property>
69+
<property>
70+
<name>dfs.datanode.max.locked.memory</name>
71+
<value>0</value>
72+
</property>
73+
<property>
74+
<name>dfs.datanode.peer.stats.enabled</name>
75+
<value>true</value>
76+
</property>
77+
<property>
78+
<name>dfs.encrypt.data.transfer</name>
79+
<value>false</value>
80+
</property>
81+
<property>
82+
<name>dfs.encrypt.data.transfer.algorithm</name>
83+
<value>rc4</value>
84+
</property>
85+
<property>
86+
<name>dfs.ha.automatic-failover.enabled</name>
87+
<value>true</value>
88+
</property>
89+
<property>
90+
<name>dfs.ha.fencing.methods</name>
91+
<value>shell(/usr/bin/true)</value>
92+
</property>
93+
<property>
94+
<name>dfs.journalnode.edits.dir</name>
95+
<value>${env.JOURNALNODE_DATA_DIR}</value>
96+
</property>
97+
<property>
98+
<name>dfs.journalnode.http-address</name>
99+
<value>0.0.0.0:8480</value>
100+
</property>
101+
<property>
102+
<name>dfs.journalnode.https-address</name>
103+
<value>0.0.0.0:8481</value>
104+
</property>
105+
<property>
106+
<name>dfs.journalnode.rpc-address</name>
107+
<value>0.0.0.0:8485</value>
108+
</property>
109+
<property>
110+
<name>dfs.namenode.handler.count</name>
111+
<value>64</value>
112+
</property>
113+
<!--
114+
<property>
115+
<name>dfs.hosts</name>
116+
<value>/tmp/scratch/hosts.json</value>
117+
</property>
118+
<property>
119+
<name>dfs.namenode.hosts.provider.classname</name>
120+
<value>org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager</value>
121+
</property>
122+
-->
123+
<property>
124+
<name>dfs.namenode.http-bind-host</name>
125+
<value>0.0.0.0</value>
126+
</property>
127+
<property>
128+
<name>dfs.namenode.https-bind-host</name>
129+
<value>0.0.0.0</value>
130+
</property>
131+
<property>
132+
<name>dfs.namenode.name.dir</name>
133+
<value>${env.NAMENODE_DATA_DIR}</value>
134+
</property>
135+
<property>
136+
<name>dfs.namenode.replication.max-streams</name>
137+
<value>20</value>
138+
</property>
139+
<property>
140+
<name>dfs.namenode.replication.max-streams-hard-limit</name>
141+
<value>40</value>
142+
</property>
143+
<property>
144+
<name>dfs.namenode.replication.min</name>
145+
<value>3</value>
146+
</property>
147+
<property>
148+
<name>dfs.namenode.replication.work.multiplier.per.iteration</name>
149+
<value>10</value>
150+
</property>
151+
<property>
152+
<name>dfs.namenode.safemode.threshold-pct</name>
153+
<value>0.9</value>
154+
</property>
155+
<property>
156+
<name>dfs.namenode.service.handler.count</name>
157+
<value>64</value>
158+
</property>
159+
<property>
160+
<name>dfs.reformat.disabled</name>
161+
<value>false</value>
162+
</property>
163+
<property>
164+
<name>dfs.replication</name>
165+
<value>3</value>
166+
</property>
167+
<property>
168+
<name>dfs.replication.max</name>
169+
<value>512</value>
170+
</property>
171+
<property>
172+
<name>ipc.8020.callqueue.impl</name>
173+
<value>org.apache.hadoop.ipc.FairCallQueue</value>
174+
</property>
175+
<property>
176+
<name>ipc.8020.scheduler.impl</name>
177+
<value>org.apache.hadoop.ipc.DecayRpcScheduler</value>
178+
</property>
179+
<property>
180+
<name>zk-dt-secret-manager.zkAuthType</name>
181+
<value>digest</value>
182+
</property>
183+
<property>
184+
<name>zk-dt-secret-manager.digest.auth</name>
185+
<value>@/etc/hadoop/zookeeper/auth/zk-auth.txt</value>
186+
</property>
187+
<property>
188+
<name>zk-dt-secret-manager.zkConnectionString</name>
189+
<value>TODO</value>
190+
</property>
191+
<property>
192+
<name>zk-dt-secret-manager.znodeWorkingPath</name>
193+
<value>TODO</value>
194+
</property>
195+
<property>
196+
<name>dfs.client.failover.proxy.provider.hadoop</name>
197+
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
198+
</property>
199+
200+
<property>
201+
<name>dfs.client.https.keystore.resource</name>
202+
<value>ssl-client.xml</value>
203+
</property>
204+
<property>
205+
<name>dfs.client.https.need-auth</name>
206+
<value>false</value>
207+
</property>
208+
<property>
209+
<name>dfs.http.policy</name>
210+
<value>${env.HTTP_POLICY}</value>
211+
</property>
212+
<property>
213+
<name>dfs.https.enable</name>
214+
<value>${env.DFS_HTTPS_ENABLE}</value>
215+
</property>
216+
<property>
217+
<name>dfs.https.server.keystore.resource</name>
218+
<value>ssl-server.xml</value>
219+
</property>
220+
<property>
221+
<name>dfs.namenode.acls.enabled</name>
222+
<value>true</value>
223+
</property>
224+
<property>
225+
<!--From https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsMultihoming.html -->
226+
<name>dfs.datanode.use.datanode.hostname</name>
227+
<value>true</value>
228+
</property>
229+
<property>
230+
<!--https://log.rowanto.com/posts/why-datanode-is-denied-communication-with-namenode/-->
231+
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
232+
<value>false</value>
233+
</property>
234+
<property>
235+
<name>dfs.namenode.shared.edits.dir</name>
236+
<value>${env.QJOURNAL}</value>
237+
</property>
238+
<property>
239+
<name>dfs.nameservices</name>
240+
<value>${env.HADOOP_SERVICE}</value>
241+
</property>
242+
<property>
243+
<name>dfs.ha.namenodes.hadoop</name>
244+
<value>namenode-0,namenode-1,namenode-2</value>
245+
</property>
246+
<property>
247+
<name>dfs.namenode.http-address.hadoop.namenode-0</name>
248+
<value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9870</value>
249+
</property>
250+
<property>
251+
<name>dfs.namenode.https-address.hadoop.namenode-0</name>
252+
<value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9871</value>
253+
</property>
254+
<property>
255+
<name>dfs.namenode.rpc-address.hadoop.namenode-0</name>
256+
<value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8020</value>
257+
<description>RPC address that handles all clients requests. In the case of
258+
HA/Federation where multiple namenodes exist, the name service id is added
259+
to the name e.g. dfs.namenode.rpc-address.ns1 dfs.namenode.rpc-address.EXAMPLENAMESERVICE
260+
The value of this property will take the form of nn-host1:rpc-port.
261+
262+
Uses the value here to find its local name. The value here must be useable
263+
making a resolvable inetsocketaddress and then pass the is local test.
264+
</description>
265+
</property>
266+
<property>
267+
<name>dfs.namenode.servicerpc-address.hadoop.namenode-0</name>
268+
<!--Service name-->
269+
<value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8022</value>
270+
</property>
271+
<property>
272+
<name>dfs.namenode.lifeline.rpc-address.hadoop.namenode-0</name>
273+
<value>namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8050</value>
274+
</property>
275+
<property>
276+
<name>dfs.namenode.http-address.hadoop.namenode-1</name>
277+
<value>namenode-1.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9870</value>
278+
</property>
279+
<property>
280+
<name>dfs.namenode.https-address.hadoop.namenode-1</name>
281+
<value>namenode-1.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9871</value>
282+
</property>
283+
<property>
284+
<name>dfs.namenode.rpc-address.hadoop.namenode-1</name>
285+
<value>namenode-1.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8020</value>
286+
</property>
287+
<property>
288+
<name>dfs.namenode.servicerpc-address.hadoop.namenode-1</name>
289+
<!--Service name-->
290+
<value>namenode-1.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8022</value>
291+
</property>
292+
<property>
293+
<name>dfs.namenode.lifeline.rpc-address.hadoop.namenode-1</name>
294+
<value>namenode-1.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8050</value>
295+
</property>
296+
<property>
297+
<name>dfs.namenode.http-address.hadoop.namenode-2</name>
298+
<value>namenode-2.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9870</value>
299+
</property>
300+
<property>
301+
<name>dfs.namenode.https-address.${HADOOP_SERVICE}.namenode-2</name>
302+
<value>namenode-2.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9871</value>
303+
</property>
304+
<property>
305+
<name>dfs.namenode.rpc-address.hadoop.namenode-2</name>
306+
<value>namenode-2.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8020</value>
307+
</property>
308+
<property>
309+
<name>dfs.namenode.servicerpc-address.hadoop.namenode-2</name>
310+
<!--Service nme-->
311+
<value>namenode-2.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8022</value>
312+
</property>
313+
<property>
314+
<name>dfs.namenode.lifeline.rpc-address.hadoop.namenode-2</name>
315+
<value>namenode-2.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8050</value>
316+
</property>
317+
<property>
318+
<name>dfs.blockreport.intervalMsec</name>
319+
<value>900000</value>
320+
<description>Determines block reporting interval in milliseconds.
321+
Report frequently else around recovery storms, the NN gets convinced
322+
there is no block space left because of 'scheduled space' reserved.
323+
</description>
324+
</property>
325+
</configuration>

0 commit comments

Comments
 (0)