From 817c18c6a16491bc500c61eb0b50bebf29c45a7f Mon Sep 17 00:00:00 2001
From: Decoupes Remy <remy.decoupes@irstea.fr>
Date: Fri, 29 Nov 2019 17:09:33 +0100
Subject: [PATCH] Prepare /etc/hosts for all nodes using private network and ip
 static

---
 inventory/static                     |  10 +-
 playbook/install-namenode.yml        |   3 +-
 playbook/roles/hadoop/tasks/main.yml | 325 ++++++++++++++-------------
 vagrant/cluster/Vagrantfile          |  33 +--
 4 files changed, 190 insertions(+), 181 deletions(-)

diff --git a/inventory/static b/inventory/static
index d78e73b..140415f 100644
--- a/inventory/static
+++ b/inventory/static
@@ -1,8 +1,8 @@
-[master-nodes]
-
-[slave-nodes]
-
-[edge-nodes]
+[namenode]
+namenode ansible_host=10.0.0.10
+[datanodes]
+datanode1 ansible_host=10.0.0.11
+datanode2 ansible_host=10.0.0.11
 
 [mono-node]
 mononode-hdfs ansible_host=172.16.50.54
\ No newline at end of file
diff --git a/playbook/install-namenode.yml b/playbook/install-namenode.yml
index 4db0190..8956c4e 100644
--- a/playbook/install-namenode.yml
+++ b/playbook/install-namenode.yml
@@ -3,4 +3,5 @@
   hosts: 'namenode'
   
   roles:
-    - common
\ No newline at end of file
+    - common
+    - hadoop
\ No newline at end of file
diff --git a/playbook/roles/hadoop/tasks/main.yml b/playbook/roles/hadoop/tasks/main.yml
index 2215385..5fb7ebd 100644
--- a/playbook/roles/hadoop/tasks/main.yml
+++ b/playbook/roles/hadoop/tasks/main.yml
@@ -4,167 +4,168 @@
     msg: 
       - "Hadoop only support java jdk 8, see https://cwiki.apache.org/confluence/display/HADOOP/Hadoop+Java+Versions when it'll support jdk 11"
       - "Be aware that things may not work when using jdk 11 like explore HDFS using webserver on port 9870"
-
-- name: Set java home as environment variable
-  become: yes
-  apt:
-    name:
-      - openjdk-11-jdk
-
-- name: create hadoop group
-  become: yes
-  group:
-    name: hadoop
-
-- name: create hadoop user
-  become: yes
-  user:
-    name: hadoop
-    group: hadoop
-    home: "{{ hadoopUSRHome }}"
-    createhome: yes
-    system: yes
-
-- name: Set JAVA_HOME as environment variable
-  become: yes
-  become_user : hadoop
-  blockinfile:
-    insertafter: EOF
-    path : ~/.bashrc
-    block: |
-      export JAVA_HOME={{ javahome }}
-      export HADOOP_HOME=/usr/local/hadoop
-      export PATH=$PATH:$HADOOP_HOME/bin
-
-- name: source .bashrc
-  become: yes
-  become_user: hadoop
-  shell: source ~/.bashrc
-  args:
-     executable: /bin/bash
-
-
-- name: create .ssh directory
-  become: yes
-  file:
-    path: "{{ hadoopUSRHome }}/.ssh/"
-    state: directory
-    owner: hadoop
-    group: hadoop
-    mode: 0700
-
-- name: copy ssh key
-  become: yes
-  copy:
-    src: "{{ item }}"
-    dest: "{{ hadoopUSRHome }}/.ssh/"
-    owner: hadoop
-    group: hadoop
-    mode: 0600
-  with_items:
-    - keys/id_rsa
-    - keys/id_rsa.pub
-
-- name: authorized ssh key for hadoop user
-  become: yes
-  authorized_key:
-    user: hadoop
-    state: present
-    key: "{{ lookup('file', 'keys/id_rsa.pub') }}"
+      - "Default IPv4 address is : {{ ansible_default_ipv4.address }}"
+
+# - name: Set java home as environment variable
+#   become: yes
+#   apt:
+#     name:
+#       - openjdk-11-jdk
+
+# - name: create hadoop group
+#   become: yes
+#   group:
+#     name: hadoop
+
+# - name: create hadoop user
+#   become: yes
+#   user:
+#     name: hadoop
+#     group: hadoop
+#     home: "{{ hadoopUSRHome }}"
+#     createhome: yes
+#     system: yes
+
+# - name: Set JAVA_HOME as environment variable
+#   become: yes
+#   become_user : hadoop
+#   blockinfile:
+#     insertafter: EOF
+#     path : ~/.bashrc
+#     block: |
+#       export JAVA_HOME={{ javahome }}
+#       export HADOOP_HOME=/usr/local/hadoop
+#       export PATH=$PATH:$HADOOP_HOME/bin
+
+# - name: source .bashrc
+#   become: yes
+#   become_user: hadoop
+#   shell: source ~/.bashrc
+#   args:
+#      executable: /bin/bash
+
+
+# - name: create .ssh directory
+#   become: yes
+#   file:
+#     path: "{{ hadoopUSRHome }}/.ssh/"
+#     state: directory
+#     owner: hadoop
+#     group: hadoop
+#     mode: 0700
+
+# - name: copy ssh key
+#   become: yes
+#   copy:
+#     src: "{{ item }}"
+#     dest: "{{ hadoopUSRHome }}/.ssh/"
+#     owner: hadoop
+#     group: hadoop
+#     mode: 0600
+#   with_items:
+#     - keys/id_rsa
+#     - keys/id_rsa.pub
+
+# - name: authorized ssh key for hadoop user
+#   become: yes
+#   authorized_key:
+#     user: hadoop
+#     state: present
+#     key: "{{ lookup('file', 'keys/id_rsa.pub') }}"
     
-- name: create a tempory directory
-  become: yes
-  file:
-    state: directory
-    path: "{{ hadoopUSRHome }}/tmp"
-
-- name: create a prod directory for hadoop
-  become: yes
-  file:
-    state: directory
-    path: "{{ hadoopDir }}"
-
-- name: "Download and Extract hadoop-{{ hadoopVersion }}"
-  become: yes
-  unarchive: 
-    src: "http://apache.mirrors.ovh.net/ftp.apache.org/dist/hadoop/core/hadoop-{{ hadoopVersion }}/hadoop-{{ hadoopVersion }}.tar.gz"
-    remote_src: yes
-    dest: "{{ hadoopDir }}"
-    extra_opts: [--strip-components=1]
-    owner: hadoop
-    group: hadoop
-
-- name : Set JAVA_HOME in hadoop-env.sh
-  become: yes
-  blockinfile:
-    insertafter: EOF
-    path: "{{ hadoopDir }}/etc/hadoop/hadoop-env.sh"
-    block: "export JAVA_HOME={{ javahome }}"
-
-- name: configure core-site.xml
-  become: yes
-  template:
-    src: templates/core-site.j2
-    dest: "{{ hadoopDir }}/etc/hadoop/core-site.xml"
-    owner: hadoop
-    group: hadoop
-
-- name: configure hdfs-site.xml
-  become: yes
-  template:
-    src: templates/hdfs-site.j2
-    dest: "{{ hadoopDir }}/etc/hadoop/hdfs-site.xml"
-    owner: hadoop
-    group: hadoop
-
-- name: configure mapred-site.xml
-  become: yes
-  template:
-    src: templates/mapred-site.j2
-    dest: "{{ hadoopDir }}/etc/hadoop/mapred-site.xml"
-    owner: hadoop
-    group: hadoop
-
-- name: copy hadoop service file
-  become: yes
-  template:
-    src: templates/hadoop.service.j2
-    dest: /etc/systemd/system/hadoop.service
-
-- name: enable hadoop service
-  become: yes
-  service:
-    daemon_reload: yes
-    name: hadoop
-    state: stopped
-    enabled: yes
-
-- name: HDFS has been already formatted ?
-  become: yes
-  stat:
-    path: /tmp/hadoop-hadoop/dfs/name/current/VERSION
-  register: file_exist
-
-- debug:
-    msg: "/tmp/hadoop-hadoop/dfs/name/current/VERSION  exists ? : {{ file_exist.stat.exists}}"
-
-- name: format HDFS
-  become: yes
-  become_user: hadoop
-  shell: "{{ hadoopDir }}/bin/hdfs namenode -format"
-  args:
-    executable: /bin/bash
-  when: file_exist.stat.exists == False
-
-- name: stopped hadoop service
-  become: yes
-  service:
-    name: hadoop
-    state: stopped
-
-- name: start hadoop service
-  become: yes
-  service:
-    name: hadoop
-    state: started
+# - name: create a tempory directory
+#   become: yes
+#   file:
+#     state: directory
+#     path: "{{ hadoopUSRHome }}/tmp"
+
+# - name: create a prod directory for hadoop
+#   become: yes
+#   file:
+#     state: directory
+#     path: "{{ hadoopDir }}"
+
+# - name: "Download and Extract hadoop-{{ hadoopVersion }}"
+#   become: yes
+#   unarchive: 
+#     src: "http://apache.mirrors.ovh.net/ftp.apache.org/dist/hadoop/core/hadoop-{{ hadoopVersion }}/hadoop-{{ hadoopVersion }}.tar.gz"
+#     remote_src: yes
+#     dest: "{{ hadoopDir }}"
+#     extra_opts: [--strip-components=1]
+#     owner: hadoop
+#     group: hadoop
+
+# - name : Set JAVA_HOME in hadoop-env.sh
+#   become: yes
+#   blockinfile:
+#     insertafter: EOF
+#     path: "{{ hadoopDir }}/etc/hadoop/hadoop-env.sh"
+#     block: "export JAVA_HOME={{ javahome }}"
+
+# - name: configure core-site.xml
+#   become: yes
+#   template:
+#     src: templates/core-site.j2
+#     dest: "{{ hadoopDir }}/etc/hadoop/core-site.xml"
+#     owner: hadoop
+#     group: hadoop
+
+# - name: configure hdfs-site.xml
+#   become: yes
+#   template:
+#     src: templates/hdfs-site.j2
+#     dest: "{{ hadoopDir }}/etc/hadoop/hdfs-site.xml"
+#     owner: hadoop
+#     group: hadoop
+
+# - name: configure mapred-site.xml
+#   become: yes
+#   template:
+#     src: templates/mapred-site.j2
+#     dest: "{{ hadoopDir }}/etc/hadoop/mapred-site.xml"
+#     owner: hadoop
+#     group: hadoop
+
+# - name: copy hadoop service file
+#   become: yes
+#   template:
+#     src: templates/hadoop.service.j2
+#     dest: /etc/systemd/system/hadoop.service
+
+# - name: enable hadoop service
+#   become: yes
+#   service:
+#     daemon_reload: yes
+#     name: hadoop
+#     state: stopped
+#     enabled: yes
+
+# - name: HDFS has been already formatted ?
+#   become: yes
+#   stat:
+#     path: /tmp/hadoop-hadoop/dfs/name/current/VERSION
+#   register: file_exist
+
+# - debug:
+#     msg: "/tmp/hadoop-hadoop/dfs/name/current/VERSION  exists ? : {{ file_exist.stat.exists}}"
+
+# - name: format HDFS
+#   become: yes
+#   become_user: hadoop
+#   shell: "{{ hadoopDir }}/bin/hdfs namenode -format"
+#   args:
+#     executable: /bin/bash
+#   when: file_exist.stat.exists == False
+
+# - name: stopped hadoop service
+#   become: yes
+#   service:
+#     name: hadoop
+#     state: stopped
+
+# - name: start hadoop service
+#   become: yes
+#   service:
+#     name: hadoop
+#     state: started
 
diff --git a/vagrant/cluster/Vagrantfile b/vagrant/cluster/Vagrantfile
index 9a98526..5b14834 100644
--- a/vagrant/cluster/Vagrantfile
+++ b/vagrant/cluster/Vagrantfile
@@ -1,15 +1,15 @@
 Vagrant.configure("2") do |config|
-	config.vm.box = "generic/debian10"
-	config.vm.network "public_network", bridge:"enp1s0", use_dhcp_assigned_default_route: true
-	config.vm.provision "shell", inline: <<-SHELL
-		systemctl stop resolvconf
-		echo "nameserver 10.34.192.61" > /etc/resolv.conf
-		echo "nameserver 10.34.192.62" >> /etc/resolv.conf
-	SHELL
-
 	config.vm.define "namenode" do |namenode|
-		config.vm.hostname = "namenode"
-		config.vm.provision "ansible" do |ansible|
+		namenode.vm.box = "generic/debian10"
+		namenode.vm.network "public_network", bridge:"enp1s0", use_dhcp_assigned_default_route: true
+		namenode.vm.provision "shell", inline: <<-SHELL
+			systemctl stop resolvconf
+			echo "nameserver 10.34.192.61" > /etc/resolv.conf
+			echo "nameserver 10.34.192.62" >> /etc/resolv.conf
+		SHELL
+		namenode.vm.hostname = "namenode"
+		namenode.vm.network :private_network, ip: "10.0.0.10"
+		namenode.vm.provision "ansible" do |ansible|
 			ansible.playbook = "/home/rdecoupe/Documents/TETIS/projet/aidmoit/ansible-deployment/playbook/install-namenode.yml"
 		end
 	end
@@ -18,10 +18,17 @@ Vagrant.configure("2") do |config|
 	N = 2
 	(1..2).each do |machine_id|
 		config.vm.define "datanode#{machine_id}" do |machine|
-			config.vm.hostname = "datanode#{machine_id}"
+			machine.vm.box = "generic/debian10"
+			machine.vm.network "public_network", bridge:"enp1s0", use_dhcp_assigned_default_route: true
+			machine.vm.provision "shell", inline: <<-SHELL
+				systemctl stop resolvconf
+				echo "nameserver 10.34.192.61" > /etc/resolv.conf
+				echo "nameserver 10.34.192.62" >> /etc/resolv.conf
+			SHELL
+			machine.vm.hostname = "datanode#{machine_id}"
+			machine.vm.network :private_network, ip: "10.0.0.1#{machine_id}"
 			if machine_id == N
-				config.vm.provision "ansible" do |ansible|
-					# ansible.playbook = "/home/rdecoupe/Documents/TETIS/projet/aidmoit/ansible-deployment/playbook/install-datanode.yml"
+				machine.vm.provision "ansible" do |ansible|
 					ansible.playbook = "../../playbook/install-datanode.yml"
 				end
 			end
-- 
GitLab