Hadoop: Difference between revisions
Jump to navigation
Jump to search
No edit summary |
|||
Line 7: | Line 7: | ||
==Configuration== | ==Configuration== | ||
< | <syntaxhighlight lang="bash"> | ||
mkdir -p /home/hadoop/hdfs/{datanode,namenode}/ | mkdir -p /home/hadoop/hdfs/{datanode,namenode}/ | ||
sudo tee -a $HADOOP_HOME/etc/hadoop/core-site.xml >/dev/null <<EOF | sudo tee -a $HADOOP_HOME/etc/hadoop/core-site.xml >/dev/null <<EOF | ||
Line 22: | Line 22: | ||
</configuration> | </configuration> | ||
EOF | EOF | ||
</ | </syntaxhighlight> | ||
---- | ---- | ||
< | <syntaxhighlight lang="bash"> | ||
mkdir -p /home/hadoop/hdfs/{datanode,namenode} | mkdir -p /home/hadoop/hdfs/{datanode,namenode} | ||
sudo tee -a $HADOOP_HOME/etc/hadoop/hdfs-site.xml >/dev/null <<EOF | sudo tee -a $HADOOP_HOME/etc/hadoop/hdfs-site.xml >/dev/null <<EOF | ||
Line 42: | Line 42: | ||
</configuration> | </configuration> | ||
EOF | EOF | ||
</ | </syntaxhighlight> | ||
---- | ---- | ||
< | <syntaxhighlight lang="bash"> | ||
sudo tee -a $HADOOP_HOME/etc/hadoop/mapred-site.xml >/dev/null <<EOF | sudo tee -a $HADOOP_HOME/etc/hadoop/mapred-site.xml >/dev/null <<EOF | ||
<configuration> | <configuration> | ||
Line 53: | Line 53: | ||
</configuration> | </configuration> | ||
EOF | EOF | ||
</ | </syntaxhighlight> | ||
---- | ---- | ||
< | <syntaxhighlight lang="bash"> | ||
sudo tee -a $HADOOP_HOME/etc/hadoop/yarn-site.xml >/dev/null <<EOF | sudo tee -a $HADOOP_HOME/etc/hadoop/yarn-site.xml >/dev/null <<EOF | ||
<configuration> | <configuration> | ||
Line 64: | Line 64: | ||
</configuration> | </configuration> | ||
EOF | EOF | ||
</ | </syntaxhighlight> | ||
==Unit Testing== | ==Unit Testing== | ||
{| | {| | ||
|valign="top"| | |valign="top"| | ||
< | <syntaxhighlight lang='bash'> | ||
lxc launch images:debian/12 agronomy && | lxc launch images:debian/12 agronomy && | ||
lxc exec agronomy bash <<'EOF' | lxc exec agronomy bash <<'EOF' | ||
Line 80: | Line 80: | ||
java -version | java -version | ||
EOF | EOF | ||
</ | </syntaxhighlight> | ||
|valign="top"| | |valign="top"| | ||
< | <syntaxhighlight lang='bash'> | ||
lxc launch images:fedora/37 robotics && | lxc launch images:fedora/37 robotics && | ||
lxc exec robotics bash <<'EOF' | lxc exec robotics bash <<'EOF' | ||
Line 94: | Line 94: | ||
java -version | java -version | ||
EOF | EOF | ||
</ | </syntaxhighlight> | ||
|valign="top"| | |valign="top"| | ||
< | <syntaxhighlight lang='bash'> | ||
lxc launch images:ubuntu/22.04 software && | lxc launch images:ubuntu/22.04 software && | ||
lxc exec software bash <<'EOF' | lxc exec software bash <<'EOF' | ||
Line 108: | Line 108: | ||
java -version | java -version | ||
EOF | EOF | ||
</ | </syntaxhighlight> | ||
|- | |- | ||
Line 202: | Line 202: | ||
|valign="top"| | |valign="top"| | ||
< | <syntaxhighlight lang="ini"> | ||
[Service] | [Service] | ||
User=hadoop | User=hadoop | ||
Line 208: | Line 208: | ||
Type=forking | Type=forking | ||
SuccessExitStatus=143 | SuccessExitStatus=143 | ||
</ | </syntaxhighlight> | ||
|- | |- | ||
Line 215: | Line 215: | ||
|- | |- | ||
|valign="top" colspan="3"| | |valign="top" colspan="3"| | ||
< | <syntaxhighlight lang="bash"> | ||
if [ -f '/etc/os-release' ];then | if [ -f '/etc/os-release' ];then | ||
HOST_OS_ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | HOST_OS_ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||
Line 221: | Line 221: | ||
HOST_OS_VERSION=$(grep -oP '(?<=^VERSION_ID=).+' /etc/os-release | tr -d '"') | HOST_OS_VERSION=$(grep -oP '(?<=^VERSION_ID=).+' /etc/os-release | tr -d '"') | ||
fi | fi | ||
</ | </syntaxhighlight> | ||
|- | |- | ||
Line 228: | Line 228: | ||
|- | |- | ||
|valign="top" colspan="3"| | |valign="top" colspan="3"| | ||
< | <syntaxhighlight lang='bash'> | ||
declare -a HADOOP_SCHEDULERS=(dev prod);\ | declare -a HADOOP_SCHEDULERS=(dev prod);\ | ||
declare -A MINMAX_CAPACITIES=([dev]='50 50' [prod]='50 70');\ | declare -A MINMAX_CAPACITIES=([dev]='50 50' [prod]='50 70');\ | ||
Line 236: | Line 236: | ||
done;\ | done;\ | ||
done | done | ||
</ | </syntaxhighlight> | ||
|} | |} | ||
Line 295: | Line 295: | ||
* [https://www.datacamp.com/tutorial/installation-of-pyspark Installation of PySpark] | * [https://www.datacamp.com/tutorial/installation-of-pyspark Installation of PySpark] | ||
* [[Bastion SSH Tunneling]] | * [[Bastion SSH Tunneling]] | ||
* [[Linux User Creation]] | |||
* [[Linux Containers]] | * [[Linux Containers]] | ||
Latest revision as of 16:43, 12 December 2024
Hadoop is a Java-based programming framework that supports the processing and storage of extremely large datasets on a cluster of inexpensive machines. It was the first major open source project in the big data playing field and is sponsored by the Apache Software Foundation. Hadoop is comprised of four main layers:
- Hadoop Common is the collection of utilities and libraries that support other Hadoop modules.
- HDFS, which stands for Hadoop Distributed File System, is responsible for persisting data to disk.
- YARN, short for Yet Another Resource Negotiator, is the "operating system" for HDFS.
- MapReduce is the original processing model for Hadoop clusters. It distributes work within the cluster or map, then organizes and reduces the results from the nodes into a response to a query. Many other processing models are available for the 3.x version of Hadoop
Configuration
mkdir -p /home/hadoop/hdfs/{datanode,namenode}/
sudo tee -a $HADOOP_HOME/etc/hadoop/core-site.xml >/dev/null <<EOF
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/tmp</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://0.0.0.0:9000</value>
<description>The default file system URI</description>
</property>
</configuration>
EOF
mkdir -p /home/hadoop/hdfs/{datanode,namenode}
sudo tee -a $HADOOP_HOME/etc/hadoop/hdfs-site.xml >/dev/null <<EOF
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/home/hadoop/hdfs/namenode/</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/home/hadoop/hdfs/datanode/</value>
</property>
</configuration>
EOF
sudo tee -a $HADOOP_HOME/etc/hadoop/mapred-site.xml >/dev/null <<EOF
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
EOF
sudo tee -a $HADOOP_HOME/etc/hadoop/yarn-site.xml >/dev/null <<EOF
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
EOF
Unit Testing
lxc launch images:debian/12 agronomy &&
lxc exec agronomy bash <<'EOF'
sleep 5
apt-get install -y curl wget openjdk-11-jre\
openssh-client openssh-server
systemctl daemon-reload
systemctl restart sshd
systemctl status sshd
java -version
EOF
|
lxc launch images:fedora/37 robotics &&
lxc exec robotics bash <<'EOF'
sleep 5
dnf install -y curl wget java-11-openjdk\
openssh-client openssh-server
systemctl daemon-reload
systemctl restart sshd
systemctl status sshd
java -version
EOF
|
lxc launch images:ubuntu/22.04 software &&
lxc exec software bash <<'EOF'
sleep 5
apt-get install -y curl wget openjdk-11-jre\
openssh-client openssh-server
systemctl daemon-reload
systemctl restart sshd
systemctl status sshd
java -version
EOF
|
| ||
create alias: lxc stop agronomy lxc publish agronomy --alias\ debian/12:java:ssh create alias from snapshot: lxc snapshot agronomy java:ssh lxc publish agronomy/java:ssh --alias\ debian/12:java:ssh lxc delete agronomy launch alias: lxc launch debian/12:java:ssh agronomy && lxc exec agronomy bash lxc stop agronomy && lxc delete agronomy |
create alias: lxc stop robotics lxc publish robotics --alias\ fedora/37:java:ssh create alias from snapshot: lxc snapshot robotics java:ssh lxc publish robotics/java:ssh --alias\ fedora/37:java:ssh lxc delete robotics launch alias: lxc launch fedora/37:java:ssh robotics && lxc exec robotics bash lxc stop robotics && lxc delete robotics |
create alias: lxc stop software lxc publish software --alias\ ubuntu/22.04:java:ssh create alias from snapshot: lxc snapshot software java:ssh lxc publish software/java:ssh --alias\ ubuntu/22.04:java:ssh lxc delete software launch alias: lxc launch ubuntu/22.04:java:ssh software && lxc exec software bash lxc stop software && lxc delete software |
Knowledge
ssh-keygen -b 4096 -t rsa -f ~/.ssh/id_rsa -q -N "hadoop@${HOSTNAME}" readlink -f /usr/bin/java | sed "s:bin/java::" sudo apt-get install pdsh sudo apt-get install ssh | ||
| ||
su -h hadoop hdfs namenode -format sudo -u haddop -H sh -c "whoami; echo ${HOME}" sh $HADOOP_HOME/sbin/start-dfs.sh http://127.0.0.1:9870 sh $HADOOP_HOME/sbin/start-yarn.sh http://127.0.0.1:8088 | ||
| ||
sudo apt dist-upgrade sudo do-release-upgrade sudo apt --fix-broken install sudo apt install ubuntu-desktop |
[Service]
User=hadoop
Group=hadoop
Type=forking
SuccessExitStatus=143
| |
| ||
if [ -f '/etc/os-release' ];then
HOST_OS_ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
HOST_OS_ID_LIKE=$(grep -oP '(?<=^ID_LIKE=).+' /etc/os-release | tr -d '"')
HOST_OS_VERSION=$(grep -oP '(?<=^VERSION_ID=).+' /etc/os-release | tr -d '"')
fi
| ||
| ||
declare -a HADOOP_SCHEDULERS=(dev prod);\
declare -A MINMAX_CAPACITIES=([dev]='50 50' [prod]='50 70');\
for HADOOP_SCHEDULER in ${HADOOP_SCHEDULERS[@]};do \
for MINMAX_CAPACITY in ${MINMAX_CAPACITIES[${HADOOP_SCHEDULER}]};do \
echo "${HADOOP_SCHEDULER} => ${MINMAX_CAPACITY}";\
done;\
done
|