Wednesday, May 21, 2014

Storage migration on Solaris server

The migration steps.
Plan:-
I believe LUNs need to be allocated to storage in new leg (anywhere except for HE1 VMAX / 3809) so the LUNs can be mounted to samserv02.  The data must be mirrored to the new leg LUNs.   Once the mirroring has been
completed, the following steps should be performed:
Unix Support team task
1. Create/Request new sets of LUNs to see at both ends on both legs.
[ Rezone the LUNs from samserv02 to samserv110Z02]
2. Mirror the existing vxvm volumes on old leg.
3. Break the mirror on old leg (Remove the plexes with old LUNs)
4. Bring the server (old leg) down to single user mode.
5. Export the vxdg
6. Import the VXDG on new leg and mount all the volumes.
7. Reboot the server to make sure everything comes clean.
8. Push new DNS updates to ALIAS samserv02.expanor.local to samserv110Z02.expanor.local
9. Idle samserv02 for 1 Week
10. Issue retirement request to retire as samserv02-OLD

copy
/usr/bin/oraenv
/var/opt/oracle/oratab

DBA Task
Create the database schema on samserv110Z02
Bring the database(s) up on samserv110Z02
Validate the database(s) are operational and ready to use

Other ticket
A. Open a change ticket to consolodate the servers ( CRQ000005095033 )
1. Open a ticket to copy eTrust rules to eTrust Access Control team.
2. Inform DBS's to work on Guardium.
4. Inform folks who manage Tripwire.
5. Open a WO to monitoring team to append spectrum rules from old leg to new server.
6. Inform operation center (OC) to suppress alarms on the servers while transitioning for migration.

============================================================


root@samserv02:/dev/vx/dsk/samserv02dg > df -h
/dev/vx/dsk/samserv02dg/FS_opt_dts
                       3.8G   2.8G   556M    84%    /opt/dts
/dev/vx/dsk/samserv02dg/FS_opt_sybase12.5
                       375M   236M   102M    70%    /opt/sybase12.5
/dev/vx/dsk/samserv02dg/FS_FSS_oracle
                        60G   5.5G    48G    11%    /FSS/oracle
/dev/vx/dsk/samserv02dg/FS_opt_sybase
                       750M   175M   500M    26%    /opt/sybase
/dev/vx/dsk/samserv02dg/FS_opt_sybase12.0
                       375M   235M   103M    70%    /opt/sybase12.0
/dev/vx/dsk/samserv02dg/FS_opt_oracle
                        79G    22G    49G    31%    /opt/oracle
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_log
                       5.6G   628M   4.4G    13%    /FSS/oracle/fsdata2/log
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_rollback
                        15G   6.6G   6.9G    50%    /FSS/oracle/fsdata2/rollback
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_system
                        15G   7.8G   5.5G    59%    /FSS/oracle/fsdata2/system
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_index
                        49G    33G    11G    76%    /FSS/oracle/fsdata2/index
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_archive
                        84G    64M    76G     1%    /FSS/oracle/fsdata2/archive
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_dump
                       372G   178G   157G    54%    /FSS/oracle/fsdata2/dump
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_data
                       536G   220G   263G    46%    /FSS/oracle/fsdata2/data
rpool                   96G    98K    32G     1%    /rpool
rpool/ROOT              96G    21K    32G     1%    /rpool/ROOT
homenas1:/vol/ppnas507av12/homeserv1/c13637
                       200G    82G   118G    42%    /home/c13637
root@samserv02:/dev/vx/dsk/samserv02dg >
---------------------------------------------------------------
root@samserv02:/dev/vx/dsk/samserv02dg > ls -l
total 0
brw-------   1 root     root     282, 123000 Nov  4  2012 FS_FSS_oracle
brw-------   1 root     root     282, 123034 Nov  4  2012 FS_FSS_oracle_fsdata2_archive
brw-------   1 root     root     282, 123030 Nov  4  2012 FS_FSS_oracle_fsdata2_data
brw-------   1 root     root     282, 123032 Nov  4  2012 FS_FSS_oracle_fsdata2_dump
brw-------   1 root     root     282, 123031 Nov  4  2012 FS_FSS_oracle_fsdata2_index
brw-------   1 root     root     282, 123038 Nov  4  2012 FS_FSS_oracle_fsdata2_log
brw-------   1 root     root     282, 123033 Nov  4  2012 FS_FSS_oracle_fsdata2_rollback
brw-------   1 root     root     282, 123039 Nov  4  2012 FS_FSS_oracle_fsdata2_system
brw-------   1 root     root     282, 123013 Nov  4  2012 FS_opt_best1
brw-------   1 root     root     282, 123011 Nov  4  2012 FS_opt_dts
brw-------   1 root     root     282, 123009 Nov  4  2012 FS_opt_oracle
brw-------   1 root     root     282, 123040 Nov  4  2012 FS_opt_patrol
brw-------   1 root     root     282, 123008 Nov  4  2012 FS_opt_quest
brw-------   1 root     root     282, 123012 Nov  4  2012 FS_opt_sybase
brw-------   1 root     root     282, 123049 Nov  4  2012 FS_opt_sybase12.0
brw-------   1 root     root     282, 123050 Nov  4  2012 FS_opt_sybase12.5
---------------------------------------------------------------
root@samserv02:/ > df -h | grep vx | grep -v swap |awk '{print $1 "\t\t\t" $6}'
/dev/vx/dsk/samserv02dg/FS_opt_dts                       /opt/dts
/dev/vx/dsk/samserv02dg/FS_opt_sybase12.5                       /opt/sybase12.5
/dev/vx/dsk/samserv02dg/FS_FSS_oracle                   /FSS/oracle
/dev/vx/dsk/samserv02dg/FS_opt_sybase                    /opt/sybase
/dev/vx/dsk/samserv02dg/FS_opt_sybase12.0                       /opt/sybase12.0
/dev/vx/dsk/samserv02dg/FS_opt_oracle                    /opt/oracle
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_log                /FSS/oracle/fsdata2/log
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_rollback           /FSS/oracle/fsdata2/rollback
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_system             /FSS/oracle/fsdata2/system
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_index              /FSS/oracle/fsdata2/index
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_archive            /FSS/oracle/fsdata2/archive
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_dump               /FSS/oracle/fsdata2/dump
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_data               /FSS/oracle/fsdata2/data
---------------------------------------------------------------
root@samserv02:/ > df -h | grep vx | grep -v swap |awk '{print $1}' | awk -F/ '{print $6}'
FS_opt_dts
FS_opt_sybase12.5
FS_FSS_oracle
FS_opt_sybase
FS_opt_sybase12.0
FS_opt_oracle
FS_FSS_oracle_fsdata2_log
FS_FSS_oracle_fsdata2_rollback
FS_FSS_oracle_fsdata2_system
FS_FSS_oracle_fsdata2_index
FS_FSS_oracle_fsdata2_archive
FS_FSS_oracle_fsdata2_dump
FS_FSS_oracle_fsdata2_data
---------------------------------------------------------------
root@samserv02:/ > vxprint -ht | grep "^v" | awk '{print $2}'
FS_FSS_oracle
FS_FSS_oracle_fsdata2_archive
FS_FSS_oracle_fsdata2_data
FS_FSS_oracle_fsdata2_dump
FS_FSS_oracle_fsdata2_index
FS_FSS_oracle_fsdata2_log
FS_FSS_oracle_fsdata2_rollback
FS_FSS_oracle_fsdata2_system
FS_opt_best1
FS_opt_dts
FS_opt_oracle
FS_opt_patrol
FS_opt_quest
FS_opt_sybase
FS_opt_sybase12.0
FS_opt_sybase12.5
root@samserv02:/ >
root@samserv02:/dev/vx/dsk/samserv02dg > ls -l | awk '{print $10}'
FS_FSS_oracle
FS_FSS_oracle_fsdata2_archive
FS_FSS_oracle_fsdata2_data
FS_FSS_oracle_fsdata2_dump
FS_FSS_oracle_fsdata2_index
FS_FSS_oracle_fsdata2_log
FS_FSS_oracle_fsdata2_rollback
FS_FSS_oracle_fsdata2_system
FS_opt_best1
FS_opt_dts
FS_opt_oracle
FS_opt_patrol
FS_opt_quest
FS_opt_sybase
FS_opt_sybase12.0
FS_opt_sybase12.5
---------------------------------------------------------------
root@samserv02:/dev/vx/dsk/samserv02dg > df -h | grep vx | awk '{print $1 "\t\t\t" $2 "\t\t\t" $6}' | grep -v
swap
/dev/vx/dsk/samserv02dg/FS_opt_dts                       3.8G                    /opt/dts
 Done - disk2
/dev/vx/dsk/samserv02dg/FS_opt_sybase12.5                       375M                    /opt/sybase12.5
 done - disk1
/dev/vx/dsk/samserv02dg/FS_FSS_oracle                    60G                     /FSS/oracle
 Done - disk1
/dev/vx/dsk/samserv02dg/FS_opt_sybase                    750M                    /opt/sybase
 done - disk1
/dev/vx/dsk/samserv02dg/FS_opt_sybase12.0                       375M                    /opt/sybase12.0
 Done - disk1
/dev/vx/dsk/samserv02dg/FS_opt_oracle                    79G                     /opt/oracle
 Done - disk2
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_log                5.6G                    /FSS/oracle/fsdata2/log
   Done - disk1
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_rollback           15G                  
/FSS/oracle/fsdata2/rollback  Done - disk3
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_system             15G                  
/FSS/oracle/fsdata2/system progress Done - disk3
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_index              49G                  
/FSS/oracle/fsdata2/index  Done - disk4
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_archive            84G                  
/FSS/oracle/fsdata2/archive   Done - disk3
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_dump               372G                
/FSS/oracle/fsdata2/dump
/dev/vx/dsk/samserv02dg/FS_FSS_oracle_fsdata2_data               536G                
/FSS/oracle/fsdata2/data   Done - disk5/6
---------------------------------------------------------------
root@samserv110 # cat samserv110z01.xml | grep vx | awk -F= '{print $2 "\t\t" $4}' | awk '{print $1 "\t" $3}' |
sed 's/"//g' | awk '{print $1}'
/dev/vx/dsk/he1unx200dg/FS_export_home
/dev/vx/dsk/he1unx200dg/FS_export_home_oracle
/dev/vx/dsk/he1unx200dg/FS_opt_oracle
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_admin
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_cfprod_archive
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_cfprod_data1
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_cfprod_data2
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_cfprod_data3
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_cfprod_data4
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_cfprod_data5
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_cfprod_data6
/dev/vx/dsk/he1unx200dg/FS_opt_dts
/dev/vx/dsk/he1unx200dg/FS_FSS_oracle_cfprod_dump
/dev/vx/dsk/cfprdbdg/FS_fm_ora_cfprdb_data1
/dev/vx/dsk/cfprdbdg/FS_fm_ora_cfprdb_data2
/dev/vx/dsk/cfprdbdg/FS_fm_ora_cfprdb_data3
/dev/vx/dsk/cfprdbdg/FS_fm_ora_cfprdb_data4
/dev/vx/dsk/cfprdbdg/FS_fm_ora_cfprdb_data5
/dev/vx/dsk/cfprdbdg/FS_fm_ora_cfprdb_data6
/dev/vx/dsk/cfprdbdg/FS_FSS_oracle_cfprdb_data7
/dev/vx/dsk/cfprdb_dumpdg/FS_fm_ora_cfprdb_dump
/dev/vx/dsk/cfprdbdg/FS_fm_ora_cfprdb_archive
/dev/vx/dsk/he1unx200dg/FS_opt_quest_foglight5
root@samserv110 #

root@samserv02:/dev/vx/dsk/samserv02dg > vxassist -b -g samserv02dg mirror FS_opt_sybase  EMCDSK01
root@samserv02:/dev/vx/dsk/samserv02dg > vxtask list
TASKID  PTID TYPE/STATE    PCT   PROGRESS
   175           ATCOPY/R 07.62% 0/1638400/124928 PLXATT FS_opt_sybase FS_opt_sybase-02 samserv02dg

=======================================================================

root@samserv02:/ > vxprint -ht | grep "^v" | awk '{print $2}'
FS_FSS_oracle
FS_FSS_oracle_fsdata2_archive
FS_FSS_oracle_fsdata2_data
FS_FSS_oracle_fsdata2_dump
FS_FSS_oracle_fsdata2_index
FS_FSS_oracle_fsdata2_log
FS_FSS_oracle_fsdata2_rollback
FS_FSS_oracle_fsdata2_system
FS_opt_best1
FS_opt_dts
FS_opt_oracle
FS_opt_patrol
FS_opt_quest
FS_opt_sybase
FS_opt_sybase12.0
FS_opt_sybase12.5

==============================


Create a volume with a plex called plex2
# vxmake -g baddg vol vol2 plex=plex2

Start a volume
Next, we must start the new volume
# vxvol -g samserv02dg start vol2

Then use vxplex ‘att’ to attach the plex to the original volume. The plex will be synchronised with the plex(es)
already attached to the volume.
# vxplex -g diskgroup att plex_name
==============================

mirror a volume

root@samserv02:/ > vxassist -b -g samserv02dg mirror FS_fss_oracle_hprd8_index disk1 disk2 disk3
root@samserv02:/ > vxprint -ht | grep FS_fss_oracle_hprd8_index
root@samserv02:/ > vxtask list
TASKID  PTID TYPE/STATE    PCT   PROGRESS
   275           ATCOPY/R 04.53% 0/109051904/4939776 PLXATT FS_fss_oracle_hprd8_index
FS_fss_oracle_hprd8_index samserv02dg

veot@samserv02:/ > vxassist -b -g samserv02dg mirror FS_fss_oracle_hprd8_archi
root@samserv02:/ > vxprint -ht | grep FS_fss_oracle_hprd8_archive
v  FS_fss_oracle_hprd8_archive - ENABLED ACTIVE 188743680 SELECT -        fsgen
pl FS_fss_oracle_hprd8_archive-02 FS_fss_oracle_hprd8_archive ENABLED ACTIVE 188753992 CONCAT - RW
sd emcdsk03-01  FS_fss_oracle_hprd8_archive-02 emcdsk03 0 188753992 0 EMC_CLARiiON0_9 ENA


root@samserv02:/ >  vxassist -b -g samserv02dg mirror FS_fss_oracle_hprd8_index

root@samserv02:/ > vxassist -b -g samserv02dg mirror FS_fss_oracle_hprd8_dump EMCDSK01 EMCDSK02 EMCDSK03
EMCDSK04 EMCDSK05 EMCDSK06
root@samserv02:/ > vxtask list
TASKID  PTID TYPE/STATE    PCT   PROGRESS
   351           ATCOPY/R 00.03% 0/832569344/215040 PLXATT FS_fss_oracle_hprd8_dump FS_fss_oracle_hprd8_dump-0
samserv02dg
root@samserv02:/ > vxprint -ht | grep FS_fss_oracle_hprd8_dump
v  FS_fss_oracle_hprd8_dump - ENABLED ACTIVE  832569344 SELECT   -        fsgen
pl FS_fss_oracle_hprd8_dump-01 FS_fss_oracle_hprd8_dump ENABLED TEMPRMSD 832569696 CONCAT - WO
sd EMCDSK06-02  FS_fss_oracle_hprd8_dump-01 EMCDSK06 138923456 832569696 0 EMC_CLARiiON1_5 ENA
pl FS_fss_oracle_hprd8_dump-02 FS_fss_oracle_hprd8_dump ENABLED ACTIVE 832569696 CONCAT - RW
sd emcdsk02-04  FS_fss_oracle_hprd8_dump-02 emcdsk02 169430296 832569696 0 EMC_CLARiiON0_7 ENA


Break a mirror

In the following steps we are going to disassociate the plexes from the parent volume using vxplex ‘dis’. VxVM
also has a vxplex ‘det’ which is to detatch a plex. Confusingly, detatching a plex with vxplex ‘det’, stops
writes to the plex, but the plex stays associated with the parent volume

The first step is to detach one of the plexes and break its link to the remaining plex(es) (i.e. dissociate).
We’ll dissociate ‘plex2'

# vxplex -g baddg dis plex2
========================
Disassociate the mirror plex:
# vxplex -g oradg dis oravol-02
Attach back mirror plex to the volumedisk:
# vxplex -g oradg att oravol oravol-02
vxplex command to remove all the plexes of the volumes
# vxplex -o rm dis oravol-02 FS_VOL_vol-02 Next_VOL-02
Removing volume (Unmount the  volume first if mounted)
# vxassist -g ORADG remove volume oravol
===========================================
root@samserv02:/var/tmp/vxvm > vxprint -ht | egrep "FS_fss_oracle-01|FS_fss_oracle_hprd8_archive-02"
pl FS_fss_oracle-01 FS_fss_oracle ENABLED ACTIVE 134232960 CONCAT -      RW
sd emcdsk00-02  FS_fss_oracle-01 emcdsk00 1639680 29360640 0     EMC_CLARiiON0_6 ENA
sd emcdsk06-01  FS_fss_oracle-01 emcdsk06 0   104872320 29360640 EMC_CLARiiON0_10 ENA
pl FS_fss_oracle_hprd8_archive-02 FS_fss_oracle_hprd8_archive ENABLED ACTIVE 188753992 CONCAT - RW
sd emcdsk03-01  FS_fss_oracle_hprd8_archive-02 emcdsk03 0 188753992 0 EMC_CLARiiON0_9 ENA
============================================
Remove a disk from vxvm dg
root@samserv02:/var/tmp > vxdg -g samserv02dg rmdisk emcdsk03 emcdsk06 emcdsk07 emcdsk08
Remove a disk from vxvm comtrol
root@samserv02:/var/tmp > vxdisk rm EMC_CLARiiON0_7 EMC_CLARiiON0_8 EMC_CLARiiON0_9 EMC_CLARiiON0_10


==============
# vxdg export oradg
# vxdg import oradg
# vxdisk list
# vxprint -ht
# vxvol -g oradg start FS1 fs2 fs 3 ...


============================================================================

# echo | format >/var/tmp/myformat.hostname
# grep c7t5006016846E06A7Cd140 myformat.hostname # just to get size of newly added disk.
# vxdisk list
# vxdisk list device_name  # in case you need multipathing info.
# vxprint -ht | grep "^sd" | more
# vxprint -ht | grep "^sd" | grep emcdsk | more
# vxprint -ht | grep "^sd" | grep emcdsk | awk '{print $3}' | more
RP_RSSD_he1147rsr2_sq01-01
RP_RSSD_he1147rsr2_sq02-01
RP_RSSD_he1147rsr11_sq01-01
RP_RSSD_he1147rsr11_sq02-01

If you have multiple diskgroup,
# vxprint -htg FSdg | grep "^sd" | grep emcdsk | awk '{print $3}' | more

The output will be plex only. Just save the output to a file.

To disk to the volume manager.
Note: If you get error while using vxdisksetup use vxdiskadm command.
# cat /var/tmp/FSdg_Volume
FS_opt_ECDA15
FS_opt_ecda126
FS_opt_patrol
FS_opt_rep125EBF11480
FS_opt_rep155
FS_opt_rep156
# df -h | egrep "FS_fss_syb_he1147sr13_dmp|FS_fss_syb_he1147sr14_dmp|FS_fss_syb_he1147sr8_dmp|
FS_fss_syb_he1147
Scanning for newly assigned LUNs.
# cfgadm -al
# devfsadm
# vxdctl enable
# vxdisk list
Mirroring manually
# vxassist -b -g FSdg mirror FS_fss_syb_he1147sr13_dmp EMCDSK10 EMCDSK11
# vxtask list
Mirror using the script, user 10, do not exceed 20.
# cat vxmirror.FSdg.ksh
vol_file=/var/tmp/FSdg_Volume
DSK_GRP=FSdg
DISKS="EMCDSK10 EMCDSK11 EMCDSK12"
for vol in `cat ${vol_file} | grep -v "^#"`
do
  echo "Mirroring volume: ${vol}"
  sleep 5
  nohup vxassist -g ${DSK_GRP} mirror ${vol} ${DISKS} 1>/var/tmp/${vol}_mirr_log.txt 2>&1 &
  echo ""
done
bash-3.2#

# vxprint -ht | grep "^sd" | awk '{print $3}' > /var/tmp/vxvm/vxvmplexes
FS_fss_sybase-02
FS_fss_sybase_rr2181sr1-02
FS_fss_sybase_rr2181sr12_du-02
FS_fss_sybase_rr2181sr13-02

bash-3.2# cat vxvmrmplex.sh
# This script removes the Veritas Plexes. Add all the plexes to a file and script will remove them.
# Tue Mar 25 10:58:27 EDT 2014
#!/bin/bash
function removePlexes() {
   for plex in `cat /var/tmp/vxvm/vxvmplexes`
        do
        sleep 1
#        vxplex -g FSdg -o rm dis $plex
# second dg, later have to use it.
        vxplex -g RPdg -o rm dis $plex
        STATUS=`/bin/echo $?`
        if [ $STATUS -eq 0 ];
           then
           logSuccess "$plex is Removed"
        else
           logSuccess "$plex  is Not  Removed"
        fi
        done
 }
function logSuccess() {
STATUS="$1"
/bin/echo "$STATUS" >> /var/tmp/vxvm/vxvm-removedplexes.txt
}
removePlexes
# EOF

# mkdir /var/tmp/vxvm

===================================================

Finally remove the old disks...
 # vxdg list
NAME         STATE           ID
samserv02dg  enabled              1080584443.1077.samserv02
samserv02 # vxdisk list | grep samserv02dg
c3t50000972083B85B4d1s2 auto:sliced     emcdsk00     samserv02dg  online
c3t50000972083B85B4d2s2 auto:sliced     emcdsk01     samserv02dg  online
c3t50000972083B85B4d3s2 auto:sliced     emcdsk02     samserv02dg  online
c3t50000972083B85B4d4s2 auto:sliced     emcdsk03     samserv02dg  online
# vxdisk list | grep samserv02dg | awk '{print $3}' >mydisks.txt
emcdsk00
emcdsk01
emcdsk02
emcdsk03

bash-3.2# more rmdisk.sh
#!/bin/ksh
for i  in `cat mydisks.txt`
do
# vxdg -g diskgroup rmdisk diskname
vxdg -g FSdg rmdisk $i
done
bash-3.2#

Set up your lab with following information.

RHCSA certification preparation


1. Install VMware Virtual box on your system for the LAB.

2. Download CentOS and install 4 different instances of OS.

3. Set up YUM repo.

4. Create 4 groups, users, and have them proper group membership:

5. Create a shared directory and change the group ownership. Assign ACL.

6. Write a simple shell script that keeps the record of mounted filesystem at 1.01 AM everyday.

7. Set up and configure one of the VM to be as a LDAP server and rest as LDAP client.

8. Install, set up and Configure autofs for user home directory mounted from LDAP server.

9. Configure your system to sync the time with LDAP server.

10 Configure the FTP Server for anonymous Download

11. Create a LVM ext4 filesystem of 2B and Resize the lvm to 500M that mounted at /home

12. Create a swap partition of 2GB and make it available upon reboot.

16. Create a vg myvg with PE size of 16MB and LV of mylv with the size of 50 PE  LV should have vfat filesystem & mount it on /mnt/lvm directory.

17. Configure Web Server

17. Find files created by a user harry and copy it into a /root/findfile directory.

18. Find a string "root" from a file /etc/passwd and copy that string in a /root/testfile file.

19. Create user manato with user ID 2233

20 Install the appropriate kernel update from ftp://sam.expanor.local. 

LVM automatic FileSystem creation

Instead of adding entry to the file, you may want to have interactive one to prompt.

# cat /tmp/a
# vg    FS      Size
#-------------------
datavg  WEB     30G
datavg  WEBDATA 50G
datavg  WEB_DEV        30G
datavg  WEB_DATA        30G
datavg  PKGS    20G

]# cat myFS.sh
#!/bin/bash
# Kamal
# Creating FS using LVM on Linux
#
set -e
cat /tmp/a | grep -v "^#" | while read myvg myfs mysize
do
echo " Creating $myfs Volume"
#echo lvcreate -L ${mysize} -n ${myfs} ${myvg}
lvcreate -L ${mysize} -n ${myfs} ${myvg}
mkfs.ext4 /dev/${myvg}/${myfs}
#mkfs.ext4 /dev/${myvg}/${myfs}
# EOF
done

# cat mymounts.sh
mkdir -p /web /webdata /webc/dev /webcdata /pkgs
 echo " ================================================"
sleep 2
 echo "Backing up /etc/fstab file"
cp -p /etc/fstab /etc/fstab.05.16.2014
sleep 2
 echo "#################################" >>/etc/fstab
 echo "/dev/datavg/WEB         /web                    ext4    defaults      1 2" >>/etc/fstab
 echo "/dev/datavg/WEBDATA         /webdata            ext4    defaults        1 2" >>/etc/fstab
 echo "/dev/datavg/WEB_DEV         /webc/dev          ext4    defaults        1 2" >>/etc/fstab
 echo "/dev/datavg/WEB_DATA         /webcdata          ext4    defaults        1 2 " >>/etc/fstab
 echo "/dev/datavg/PKGS         /pkgs                  ext4    defaults    1 2" >>/etc/fstab
 echo " Mounting filesystems"
sleep 2
mount -a
sleep 2
 echo "Displaying currently mounted fs"
df -h /web /webdata /webc/dev /webcdata /pkgs
echo "Task completed"

echo "================================================="
echo "Backing up /etc/fstab file"
cp -p /etc/fstab /etc/fstab.05.16.2014
sleep 2
echo "#################################" >>/etc/fstab
echo "/dev/datavg/WEB         /web                    ext4    defaults      1 2" >>/etc/fstab
echo "/dev/datavg/WEBDATA         /webdata            ext4    defaults        1 2" >>/etc/fstab
echo "/dev/datavg/WEB_DEV         /webc/dev          ext4    defaults        1 2" >>/etc/fstab
echo "/dev/datavg/WEB_DATA         /webcdata          ext4    defaults        1 2 " >>/etc/fstab
echo "/dev/datavg/PKGS         /pkgs                  ext4    defaults    1 2" >>/etc/fstab
sleep 2
echo " "
echo "Creating mount points"
mkdir -p /web /webdata /webc/dev /webcdata /pkgs
echo " "
echo " Mounting filesystems"
sleep 2
mount -a
sleep 2
echo "Displaying currently mounted fs"
df -h /web /webdata /webc/dev /webcdata /pkgs
echo "Task completed"
echo "Now changing ownership"
chown -R edsmgr:edsgrp /web /webdata /webc/dev /webcdata /pkgs


===================================================


1. Files on this task
 a. Script: lvm_create.sh
 b. config file: LVM_WEBLOGIC
2. Put both file on same directory
or
- create two directories bin and etc.
- put the script under bin dir and config under etc.

# cat lvm_create.sh
#!/bin/sh
# SAM Bhusal
# Automate FS creation task. This script tested and verified on Redhat Enterprise 6.x version.
# Config file entry eg.
# # file_name: LVM_WEBLOGIC
# # For Weblogic
# # vg    FS      Size(GB) Mnt point
# --------------------------------
# datavg  WEB     10     /web
# datavg  WEBDATA 15     /webdata
#
# Mon Feb 24 12:13:24 EST 2014
# Update: Thu Aug 14 09:55:12 EDT 2014
# Added the entry for mount point on config file.
# Added entry to the fstab.
#
if [ `/usr/bin/whoami` != "root" ]
then
        echo "You must be root to run this script"
        exit 1
fi
# Creating LVM.
/bin/cat LVM_WEBLOGIC | grep -v "^#" | while read myvg myfs mysize mymt
do
        echo " "
        # Check if mount point already exists.
        #/bin/df -h ${mymt}
        /bin/ls -ld ${mymt}
        if [ $? -eq 0 ]; then
                echo "Mount Point exists."
                echo "Please review the config file."
                exit 1
        fi
        /sbin/lvcreate -L ${mysize}G -n ${myfs} ${myvg}
        /sbin/mkfs.ext4 /dev/${myvg}/${myfs}
        # Create mount point
        /bin/mkdir -p ${mymt}
        # Add Entry to fstab
        echo "###########################################################################" >>/etc/fstab
        echo "/dev/${myvg}/${myfs}      ${mymt} ext4    defaults        1 2" >>/etc/fstab
        # Mount the filesystem.
        /bin/mount -a
        # Verify if the filesystems were created.
        /bin/df -h      ${mymt}
        # Check the condition if the process is successful.
        if [ $? -eq 0 ]; then
                echo "Successfully Created fileystem."
                continue
        else
                echo "Failed, please review the error"
                # exit
        fi
echo " ----------------end----------------"
done
# EOF

# cat LVM_WEBLOGIC
# file_name: LVM_WEBLOGIC
# SAM Bhusal
# For Weblogic
# vg    FS       Size(GB)  mPoint
#---------------------------------------
datavg  PKGS            20      /pkgs
datavg  WWW             10      /www
datavg  WWW_DOCROOT     10      /www/docroot
datavg  WWW_SERVERS     10      /www/servers
datavg  WEB_TEST       10      /web/test
datavg  WEB_DATA        10      /webdata

Install and configure puppet on Linux (Redhat)

How To Install And Configure Puppet (Getting Puppet Up)  -- copied
Puppet is a system management tool used my many large and small enterprises to manage their infrastructure. From the top of my head Twitter, Wikipedia, Digg, Nokia, Rackspace are some of the companies using it and there is no reason that you cannot use it to manage that single server you love or the entire data center you own.
Installing puppet is not difficult but i"ll recommend installing puppet on Fedora/EL from tmz repo instead of the official repo. Official repo is generally out of date while tmz repo has latest builds. I don't know if some such repo exists for debian-like distributions. If you are interested in installing from source then you should check out this page. Puppet follows client - server architecture. So you need to run a server and a client (which can be on the same machine). Install "puppet" on the client side and "puppet-server" on the server side.
Now let us start with building the server:
Install and set up Puppet Server
1: Configure tmz repo and install puppet-server

# yum install puppet-server
2: Puppet, by default, looks for site.pp file in manifest directory. Let us create one, if not present already.
# mkdir /etc/puppet/manifests
# touch /etc/puppet/manifests/site.pp
3: Start the puppet master (a.k.a. server) using:
# service puppetmaster start
Install and set up Puppet Client
1: Tell the client where is the server by adding server entry in [main] section:
[main]
server=puppet.aditya.pa
2: Start the puppet client
# service puppet start
Puppet client will request a certificate from master. Now let us go to the master and sign the certificate.
Run the following command on puppet master:-
# puppet cert --list
Sign the correct certificate by:
puppet cert --sign fed1.aditya.pa
Our puppet is up and running and ready to use. I'll build some manifests and modules to manage applications in next post or if you want, you can catch me at Fedora Users and Developers Conference at Pune, India on 6 Novemeber, 2011 where I'll build some manifests and modules live as a part of hackfest event.
http://blog.adityapatawari.com/2011/11/how-to-install-and-configure-puppet.html

https://blogs.oracle.com/observatory/entry/puppet_configuration_in_solaris

http://docs.oracle.com/cd/E23824_01/html/E21803/copyrepo1.html#scrolltoc

Find used ip address in a subnet

[kamal@sama ~]$ for ip in $(seq 1 254); do ping -c 1 192.168.10.$ip; done


[kamal@sama ~]$ nmap -n -sP 192.168.10.0/24  


Starting Nmap 5.51 ( http://nmap.org ) at 2014-05-21 22:26 EDT
Nmap scan report for 192.168.10.1
Host is up (0.00064s latency).
Nmap scan report for 192.168.10.13
Host is up (0.017s latency).
Nmap scan report for 192.168.10.14
Host is up (0.023s latency).
Nmap scan report for 192.168.10.24
Host is up (0.076s latency).
Nmap scan report for 192.168.10.110
Host is up (0.00011s latency).
Nmap done: 256 IP addresses (5 hosts up) scanned in 5.30 seconds
[kamal@sama ~]$


[kamal@sama ~]$ for ip in $(seq 1 254); do ping -c 1 192.168.10.$ip; done
PING 192.168.10.1 (192.168.10.1) 56(84) bytes of data.
64 bytes from 192.168.10.1: icmp_seq=1 ttl=64 time=0.405 ms

--- 192.168.10.1 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.405/0.405/0.405/0.000 ms
PING 192.168.10.2 (192.168.10.2) 56(84) bytes of data.
From 192.168.10.110 icmp_seq=1 Destination Host Unreachable

--- 192.168.10.2 ping statistics ---
1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 3000ms

PING 192.168.10.3 (192.168.10.3) 56(84) bytes of data.
^Z
[2]+  Stopped                 ping -c 1 192.168.10.$ip
[kamal@sama ~]$


[kamal@sama ~]$ for ip in $(seq 1 254); do ping -c 1 192.168.10.$ip>/dev/null; [ $? -eq 0 ] && echo "192.168.10.$ip UP" || : ; done
192.168.10.1 UP
192.168.10.13 UP
192.168.10.14 UP
192.168.10.17 UP
192.168.10.24 UP


on Windows, at dos prompt type,

for /L %I in (1,1,254) DO ping -w 30 -n1 192.168.1.%I | find "Reply"

Tuesday, May 20, 2014

Step by step guide to set up hadoop on CentOS

How to Setup Hadoop 1.2.1 on CentOS/RHEL 6/5

Preparing ... draft ..
Apache Hadoop is an open-source software framework for storage and large-scale processing of data-
sets on clusters of commodity hardware. Hadoop is an Apache top-level project being built and used by
a global community of contributors and users. Hadoop brings the ability to cheaply process large
amounts of data, regardless of its structure.
Server configuration
--------------------
Pre, installation steps:
install and setup the vmware server or virtual box and create following servers below.
OS: CentOS 6.5
servers specification
It is going to be 4 Node cluster.
Assign more memory to the first node cluster which requires resources.
server1.expanor.local 2 CPU 4GB Ram  50GB disk space
server2.expanor.local 1 CPU 2GB Ram  50GB disk space
server3.expanor.local 1 CPU 2GB Ram  50GB disk space
server4.expanor.local 1 CPU 2GB Ram  50GB disk space

VM creation

Create the VM with the following parameters:
 - Bridge network
 - Enough disk space (more than 40GB)
 - 2 GB of RAM
 - Setup the DVD to point to the CentOS iso image

Network Configuration
---------------------
Make the following changes for network configuration which allow all cluster nodes to interact.

# cat /etc/resolv.conf
search expanor.local
nameserver 192.168.10.110

# cat /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=sam.expanor.local
GATEWAY=192.168.10.1

# cat /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
ONBOOT=yes
PROTO=static
IPADDR=192.168.10.200
NETMASK=255.255.255.0
# cat /etc/selinux/config
SELINUX=disabled
# /etc/yum/pluginconf.d/fastestmirror.conf
enabled=0

restart the network services to make changes effective.
# chkconfig iptables off
# /etc/init.d/network restart or service network stop/start/restart

Setup Cluster Hosts
If you don't have dns set up, add the following entry to your /etc/hosts
cat /etc/hosts
192.168.10.201 hadoop1.expanor.local hadoop1
192.168.10.202 hadoop2.expanor.local hadoop2
192.168.10.203 hadoop3.expanor.local hadoop3
192.168.10.204 hadoop4.expanor.local hadoop4
Setup SSH
set up ssh key to have passwordless authentication.
# yum -y install perl openssh-clients
# ssh-keygen (type enter, enter, enter)
# cd ~/.ssh
# cp id_rsa.pub authorized_keys

Modify the ssh configuration file to no so that it will prevent asking question when connecting with
SSH to the host.

# vi /etc/ssh/ssh_config
StrictHostKeyChecking no
Shutdown and Clone
Now, shutdown the system.
# init 0

Lets create the server nodes that will be members of the cluster.

in VirtualBox, clone the base server, using the ‘Linked Clone’ option and name the nodes hadoop1,
hadoop2, hadoop3 and hadoop4.

For the first node (hadoop1), change the memory settings to 8GB of memory. Most of the roles will be
installed on this node, and therefore it is important that it have sufficient memory available.
Clones Customization

For every node, proceed with the following operations:

Modify the hostname of the server, change the following line in the file:

/etc/sysconfig/network
 HOSTNAME=hadoop[n].example.com

Where [n] = 1..4 (up to the number of nodes)

Modify the fixed IP address of the server, change the following line in the file:

/etc/sysconfig/network-scripts/ifcfg-eth0
IPADDR=10.0.1.20[n]
Where [n] = 1..4 (up to the number of nodes)
Let’s restart the networking services and reboot the server, so that the above changes takes effect:
# /etc/init.d/network restart
#  init 6

Now, we have four running virtual machines with CentOS correctly configured.



===============================================================
1. Install Java

Verify that JAVA is install on your system using java -version on the command prompt or just type
java and see if you get something on return on command not found.

Steps to Install JAVA 7 on your Lunux system. ( Redhat and CentOS)
Download the java package from Oracle.
# cd /var/tmp
# wget http://download.oracle.com/otn-pub/java/jdk/7u55-b13/jdk-7u55-linux-i586.tar.gz?
AuthParam=1398049773_df113de6ac9a884bbf0b37f61c742aeb
# tar xzf jdk-7u55-linux-i586.tar.gz
# cd /opt/jdk1.7.0_55/
# alternatives --install /usr/bin/java java /opt/jdk1.7.0_55/bin/java 2
# alternatives --config java
option 4
Verify java version
# java -version
Setup Environment Variables
# export JAVA_HOME=/opt/jdk1.7.0_55
Setup JRE_HOME Variable
# export JRE_HOME=/opt/jdk1.7.0_55/jre
Setup PATH Variable
# export PATH=$PATH:/opt/jdk1.7.0_55/bin:/opt/jdk1.7.0_55/jre/bin

2. Create User Account

Create a user account for hadoop installation.
# useradd hadoop
# passwd hadoop
3. Configuring Key Based authentication (passwordless login).
# su - hadoop
$ ssh-keygen -t rsa
$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
$ chmod 0600 ~/.ssh/authorized_keys
$ exit
Edit .bash_profile to add java stuff to load upon login.
# vi .bash_profile
Export JAVA_HOME=/usr/java/jdk1.7.0_45
PATH =$PATH:$HOME/bin:/usr/java/jdk1.7.0_45/bin
Export PATH
4. Download and Extract the file.
# mkdir /opt/hadoop; cd /opt/hadoop/
# wget http://apache.mesi.com.ar/hadoop/common/hadoop-1.2.1/hadoop-1.2.1.tar.gz
# tar -xzf hadoop-1.2.1.tar.gz
# mv hadoop-1.2.1 hadoop
# chown -R hadoop /opt/hadoop
# cd /opt/hadoop/hadoop/

5: Configure Hadoop
a. Edit core-site.xml
# vi conf/core-site.xml
#Add the following inside the configuration tag
<property>
    <name>fs.default.name</name>
    <value>hdfs://localhost:9000/</value>
</property>
<property>
    <name>dfs.permissions</name>
    <value>false</value>
</property>
wq!

b. Edit hdfs-site.xml

# vi conf/hdfs-site.xml
 # Add the following inside the configuration tag
<property>
    <name>dfs.data.dir</name>
    <value>/opt/hadoop/hadoop/dfs/name/data</value>
    <final>true</final>
</property>
<property>
    <name>dfs.name.dir</name>
    <value>/opt/hadoop/hadoop/dfs/name</value>
    <final>true</final>
</property>
<property>
    <name>dfs.replication</name>
    <value>2</value>
</property>
c. Edit mapred-site.xml
# vi conf/mapred-site.xml
 # Add the following inside the configuration tag
<property>
    <name>mapred.job.tracker</name>
    <value>localhost:9001</value>
</property>
d. Edit hadoop-env.sh
# vim conf/hadoop-env.sh
export JAVA_HOME=/opt/jdk1.7.0_55
export HADOOP_OPTS=-Djava.net.preferIPv4Stack=true

Next to format Name Node
# su - hadoop
$ cd /opt/hadoop/hadoop
$ bin/hadoop namenode -format

6: Start Hadoop Services
$ bin/start-all.sh
7: Test and Access Hadoop Services
Use ‘jps‘ command to check if all services are started well.
$ jps
or
$ $JAVA_HOME/bin/jps
Web Access URLs for Services
  http://srv1.tecadmin.net:50030/   for the Jobtracker
  http://srv1.tecadmin.net:50070/   for the Namenode
  http://srv1.tecadmin.net:50060/   for the Tasktracker

8: Stop Hadoop Services
# bin/stop-all.sh

source
http://en.wikipedia.org/wiki/Apache_Hadoop
http://tecadmin.net/steps-to-install-hadoop-on-centosrhel-6/
https://blog.cloudera.com/blog/2014/01/how-to-create-a-simple-hadoop-cluster-with-virtualbox/
http://solutionsatexperts.com/hadoop-installation-steps-on-centos-6/
http://hortonworks.com/blog/set-up-apache-hadoop-in-minutes-with-rpms/
not used
http://icanhadoop.blogspot.com/2012/09/configuring-hadoop-is-very-if-you-just.html
http://gbif.blogspot.com/2011/01/setting-up-hadoop-cluster-part-1-manual.html
https://blog.codecentric.de/en/2012/12/tutorial-installing-a-apache-hadoop-single-node-cluster-with-hortonworks-data-platform/

========================
Detail ------------------





================================



[root@sama ~]# ls -l jre-7u55-linux-i586.rpm\?AuthParam\=1400640097_944834eac90eb39afbab6dec970e6473\&GroupName\=JSC\&FilePath\=%2FESD6%2FJSCDL%2Fjdk%2F7u55-b13%2Fjre-7u55-linux-i586.rpm\&File\=jre-7u55-linux-i586.rpm\&BHost\=javadl.sun.com
-rw-r--r--. 1 root root 33040762 Apr 22 11:44 jre-7u55-linux-i586.rpm?AuthParam=1400640097_944834eac90eb39afbab6dec970e6473&GroupName=JSC&FilePath=%2FESD6%2FJSCDL%2Fjdk%2F7u55-b13%2Fjre-7u55-linux-i586.rpm&File=jre-7u55-linux-i586.rpm&BHost=javadl.sun.com
[root@sama ~]# mv jre-7u55-linux-i586.rpm\?AuthParam\=1400640097_944834eac90eb39afbab6dec970e6473\&GroupName\=JSC\&FilePath\=%2FESD6%2FJSCDL%2Fjdk%2F7u55-b13%2Fjre-7u55-linux-i586.rpm\&File\=jre-7u55-linux-i586.rpm\&BHost\=javadl.sun.com  jre-7u55-linux-i586.rpm
[root@sama ~]# file jre-7u55-linux-i586.rpm
jre-7u55-linux-i586.rpm: RPM v3.0 bin i386/x86_64
[root@sama ~]# rpm -ql jre-7u55-linux-i586.rpm | more
package jre-7u55-linux-i586.rpm is not installed
[root@sama ~]# rpm -ivh jre-7u55-linux-i586.rpm
Preparing...                ########################################### [100%]
   1:jre                    ########################################### [100%]
Unpacking JAR files...
        rt.jar...
        jsse.jar...
        charsets.jar...
        localedata.jar...
        jfxrt.jar...
        plugin.jar...
        javaws.jar...
        deploy.jar...
[root@sama ~]# which java
/usr/bin/java
[root@sama ~]# rpm -qf /usr/bin/java
file /usr/bin/java is not owned by any package
[root@sama ~]# which /usr/bin/java
/usr/bin/java
[root@sama ~]# java -version
java version "1.6.0_20"
OpenJDK Runtime Environment (IcedTea6 1.9.7) (rhel-1.39.1.9.7.el6-i386)
OpenJDK Client VM (build 19.0-b09, mixed mode)
[root@sama ~]# echo $JAVA_HOME

[root@sama ~]# cd /usr/local/
[root@sama local]# ls
bin  etc  games  include  lib  libexec  sbin  share  src
[root@sama local]# cd bin/
[root@sama bin]# ls
noip2
[root@sama bin]# cd ..
[root@sama local]# pwd
/usr/local
[root@sama local]# rpm -qa | grpe -i java
-bash: grpe: command not found
^C[root@sama local]# rpm -qa | grep -i java
tzdata-java-2011g-1.el6.noarch
java-1.6.0-openjdk-1.6.0.0-1.39.1.9.7.el6.i686
[root@sama local]# rpm -qf java-1.6.0-openjdk-1.6.0.0-1.39.1.9.7.el6.i686 | more
error: file /usr/local/java-1.6.0-openjdk-1.6.0.0-1.39.1.9.7.el6.i686: No such file or directory
[root@sama local]# cd  /usr/local/
[root@sama local]# la
-bash: la: command not found
[root@sama local]# ls
bin  etc  games  include  lib  libexec  sbin  share  src
[root@sama local]# cd bin
[root@sama bin]# ls
noip2
[root@sama bin]# cd ../etc
[root@sama etc]# ls
NO-IPxCB4Bc
[root@sama etc]# cd ..
[root@sama local]# pwd
/usr/local
[root@sama local]# cd /usr/
[root@sama usr]# cd java
[root@sama java]# ls
default  jre1.7.0_55  latest
[root@sama java]# pwd
/usr/java
[root@sama java]# pwd
/usr/java
[root@sama java]# ls -ltr
total 4
drwxr-xr-x. 6 root root 4096 May 20 22:52 jre1.7.0_55
lrwxrwxrwx. 1 root root   21 May 20 22:52 latest -> /usr/java/jre1.7.0_55
lrwxrwxrwx. 1 root root   16 May 20 22:52 default -> /usr/java/latest
[root@sama java]# java
Usage: java [-options] class [args...]
           (to execute a class)
   or  java [-options] -jar jarfile [args...]
           (to execute a jar file)
where options include:
    -d32          use a 32-bit data model if available
    -d64          use a 64-bit data model if available
    -client       to select the "client" VM
    -server       to select the "server" VM
    -hotspot      is a synonym for the "client" VM  [deprecated]
                  The default VM is client.

    -cp <class search path of directories and zip/jar files>
    -classpath <class search path of directories and zip/jar files>
                  A : separated list of directories, JAR archives,
                  and ZIP archives to search for class files.
    -D<name>=<value>
                  set a system property
    -verbose[:class|gc|jni]
                  enable verbose output
    -version      print product version and exit
    -version:<value>
                  require the specified version to run
    -showversion  print product version and continue
    -jre-restrict-search | -jre-no-restrict-search
                  include/exclude user private JREs in the version search
    -? -help      print this help message
    -X            print help on non-standard options
    -ea[:<packagename>...|:<classname>]
    -enableassertions[:<packagename>...|:<classname>]
                  enable assertions with specified granularity
    -da[:<packagename>...|:<classname>]
    -disableassertions[:<packagename>...|:<classname>]
                  disable assertions with specified granularity
    -esa | -enablesystemassertions
                  enable system assertions
    -dsa | -disablesystemassertions
                  disable system assertions
    -agentlib:<libname>[=<options>]
                  load native agent library <libname>, e.g. -agentlib:hprof
                  see also, -agentlib:jdwp=help and -agentlib:hprof=help
    -agentpath:<pathname>[=<options>]
                  load native agent library by full pathname
    -javaagent:<jarpath>[=<options>]
                  load Java programming language agent, see java.lang.instrument
    -splash:<imagepath>
                  show splash screen with specified image
See http://java.sun.com/javase/reference for more details.
[root@sama java]# export JAVA_HOME=/usr/java
[root@sama java]# pwd
/usr/java
[root@sama java]# ls
default  jre1.7.0_55  latest
[root@sama java]# cd latest
[root@sama latest]# ls
bin        man      THIRDPARTYLICENSEREADME-JAVAFX.txt
COPYRIGHT  plugin   THIRDPARTYLICENSEREADME.txt
lib        README   Welcome.html
LICENSE    release
[root@sama latest]# cd bin
[root@sama bin]# ls
ControlPanel  java_vm  jcontrol  orbd     policytool  rmiregistry  tnameserv
java          javaws   keytool   pack200  rmid        servertool   unpack200
[root@sama bin]# pwd
/usr/java/latest/bin
[root@sama bin]# export PATH=$PATH:/usr/java/latest/bin
[root@sama bin]# useradd hadoop
[root@sama bin]# passwd hadoop
Changing password for user hadoop.
New password:
BAD PASSWORD: it is based on a dictionary word
Retype new password:
passwd: all authentication tokens updated successfully.
[root@sama bin]# su - hadoop
[hadoop@sama ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
d4:0d:37:5d:55:73:47:5d:92:5e:64:38:01:34:01:16 hadoop@sama.expanor.local
The key's randomart image is:
+--[ RSA 2048]----+
|          E+B+oO/|
|         o + o=o*|
|        . . .. o |
|       .      .  |
|        S        |
|                 |
|                 |
|                 |
|                 |
+-----------------+
[hadoop@sama ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys^C
[hadoop@sama ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@sama ~]$ chmod 0600 ~/.ssh/authorized_keys
[hadoop@sama ~]$ exit
logout
[root@sama bin]# ssh hadoop@localhost

 ####################################################################
 ####################################################################
 ####################################################################
 #####                                                          #####
 #####                                                          #####
 #####      WARNING** This computer system is Expanor, LLC      #####
 #####      property, and is to be used only by authorized      #####
 #####      users.  Misuse  of this computer system  is  a      #####
 #####      violation  of Federal law. All users  of  this      #####
 #####      system, whether authorized or not, are subject      #####
 #####      to monitoring  by system personnel and  by law      #####
 #####      enforcement officials. Anyone using this system     #####
 #####      expressly consents to such monitoring. Evidence     #####
 #####      of criminal activity or other misconduct may be     #####
 #####      provided   to   law   enforcement   officials.      #####
 #####      Electronic messages(e-mail) on this system are      #####
 #####      Expanor, LLC property. The Expanor may access       #####
 #####      these messages whenever such access serves a        #####
 #####      legitimate purpose.                                 #####
 #####                                                          #####
 ####################################################################
 ####################################################################
 ####################################################################
hadoop@localhost's password:

[root@sama bin]# su - hadoop
[hadoop@sama ~]$ pwd
/home/hadoop
[hadoop@sama ~]$ ls
[hadoop@sama ~]$ cd .ssh
[hadoop@sama .ssh]$ ls -ltr
total 12
-rw-------. 1 hadoop hadoop 1675 May 20 23:02 id_rsa
-rw-r--r--. 1 hadoop hadoop  407 May 20 23:02 id_rsa.pub
-rw-------. 1 hadoop hadoop  407 May 20 23:02 authorized_keys
[hadoop@sama .ssh]$ vi vi .bash_profile ^C
[hadoop@sama .ssh]$ cd ..
[hadoop@sama ~]$ vi .bash_profile
[hadoop@sama ~]$ cd mkdir /opt/hadoop; cd /opt/hadoop/^C
[hadoop@sama ~]$ mkdir /opt/hadoop; cd /opt/hadoop/
mkdir: cannot create directory `/opt/hadoop': Permission denied
-bash: cd: /opt/hadoop/: No such file or directory
[hadoop@sama ~]$ logout
[root@sama bin]# mkdir /opt/hadoop; cd /opt/hadoop/
[root@sama hadoop]# wget http://apache.mesi.com.ar/hadoop/common/hadoop-1.2.1/hadoop-1.2.1.tar.gz
--2014-05-20 23:05:56--  http://apache.mesi.com.ar/hadoop/common/hadoop-1.2.1/hadoop-1.2.1.tar.gz
Resolving apache.mesi.com.ar... 64.95.245.79
Connecting to apache.mesi.com.ar|64.95.245.79|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 63851630 (61M) [application/x-gzip]
Saving to: “hadoop-1.2.1.tar.gz”

100%[======================================>] 63,851,630  2.16M/s   in 29s

2014-05-20 23:06:25 (2.09 MB/s) - “hadoop-1.2.1.tar.gz” saved [63851630/63851630]

[root@sama hadoop]# cat ~hadoop/.bash_profile
# .bash_profile

# Get the aliases and functions
if [ -f ~/.bashrc ]; then
        . ~/.bashrc
fi

# User specific environment and startup programs

PATH=$PATH:$HOME/bin

export PATH
Export JAVA_HOME=/usr/java/latest/
export PATH=$PATH:/usr/java/latest/bin

[root@sama hadoop]# tar -xzf hadoop-1.2.1.tar.gz
[root@sama hadoop]# mv hadoop-1.2.1 hadoop
[root@sama hadoop]# chown -R hadoop /opt/hadoop
[root@sama hadoop]# cd /opt/hadoop/hadoop/
[root@sama hadoop]# vi conf/core-site.xml
[root@sama hadoop]# cp -i   conf/core-site.xml conf/core-site.xml.bk
[root@sama hadoop]# vi conf/core-site.xml
[root@sama hadoop]# cp -p conf/hdfs-site.xml conf/hdfs-site.xml.bk
[root@sama hadoop]# vi conf/hdfs-site.xml
[root@sama hadoop]# cp -p conf/mapred-site.xml conf/mapred-site.xml.bk
[root@sama hadoop]# vi conf/mapred-site.xml
[root@sama hadoop]# cp -p conf/hadoop-env.sh conf/hadoop-env.sh.bk
[root@sama hadoop]# vi conf/hadoop-env.sh
[root@sama hadoop]# su - hadoop
-bash: Export: command not found
[hadoop@sama ~]$ vi .bash_profile
[hadoop@sama ~]$ logout
[root@sama hadoop]# su - hadoop
[hadoop@sama ~]$ cd /opt/hadoop/hadoop
[hadoop@sama hadoop]$  bin/hadoop namenode -format
14/05/20 23:14:27 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = sama.expanor.local/192.168.10.110
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 1.2.1
STARTUP_MSG:   build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.2 -r 1503152; compiled by 'mattf' on Mon Jul 22 15:23:09 PDT 2013
STARTUP_MSG:   java = 1.7.0_55
************************************************************/
14/05/20 23:14:28 INFO util.GSet: Computing capacity for map BlocksMap
14/05/20 23:14:28 INFO util.GSet: VM type       = 32-bit
14/05/20 23:14:28 INFO util.GSet: 2.0% max memory = 1013645312
14/05/20 23:14:28 INFO util.GSet: capacity      = 2^22 = 4194304 entries
14/05/20 23:14:28 INFO util.GSet: recommended=4194304, actual=4194304
14/05/20 23:14:28 INFO namenode.FSNamesystem: fsOwner=hadoop
14/05/20 23:14:29 INFO namenode.FSNamesystem: supergroup=supergroup
14/05/20 23:14:29 INFO namenode.FSNamesystem: isPermissionEnabled=true
14/05/20 23:14:29 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100
14/05/20 23:14:29 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
14/05/20 23:14:29 INFO namenode.FSEditLog: dfs.namenode.edits.toleration.length = 0
14/05/20 23:14:29 INFO namenode.NameNode: Caching file names occuring more than 10 times
14/05/20 23:14:29 INFO common.Storage: Image file /opt/hadoop/hadoop/dfs/name/current/fsimage of size 112 bytes saved in 0 seconds.
14/05/20 23:14:29 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/opt/hadoop/hadoop/dfs/name/current/edits
14/05/20 23:14:29 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/opt/hadoop/hadoop/dfs/name/current/edits
14/05/20 23:14:30 INFO common.Storage: Storage directory /opt/hadoop/hadoop/dfs/name has been successfully formatted.
14/05/20 23:14:30 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at sama.expanor.local/192.168.10.110
************************************************************/
[hadoop@sama hadoop]$ pwd
/opt/hadoop/hadoop
[hadoop@sama hadoop]$ bin/start-all.sh
starting namenode, logging to /opt/hadoop/hadoop/libexec/../logs/hadoop-hadoop-namenode-sama.expanor.local.out
The authenticity of host 'localhost (::1)' can't be established.
RSA key fingerprint is c4:dd:1b:00:b0:91:28:b4:83:14:0d:55:be:8f:4f:0a.
Are you sure you want to continue connecting (yes/no)? yes
localhost: Warning: Permanently added 'localhost' (RSA) to the list of known hosts.
localhost:
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  #####                                                          #####
localhost:  #####                                                          #####
localhost:  #####      WARNING** This computer system is Expanor, LLC      #####
localhost:  #####      property, and is to be used only by authorized      #####
localhost:  #####      users.  Misuse  of this computer system  is  a      #####
localhost:  #####      violation  of Federal law. All users  of  this      #####
localhost:  #####      system, whether authorized or not, are subject      #####
localhost:  #####      to monitoring  by system personnel and  by law      #####
localhost:  #####      enforcement officials. Anyone using this system     #####
localhost:  #####      expressly consents to such monitoring. Evidence     #####
localhost:  #####      of criminal activity or other misconduct may be     #####
localhost:  #####      provided   to   law   enforcement   officials.      #####
localhost:  #####      Electronic messages(e-mail) on this system are      #####
localhost:  #####      Expanor, LLC property. The Expanor may access       #####
localhost:  #####      these messages whenever such access serves a        #####
localhost:  #####      legitimate purpose.                                 #####
localhost:  #####                                                          #####
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  ####################################################################
localhost: starting datanode, logging to /opt/hadoop/hadoop/libexec/../logs/hadoop-hadoop-datanode-sama.expanor.local.out
localhost:
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  #####                                                          #####
localhost:  #####                                                          #####
localhost:  #####      WARNING** This computer system is Expanor, LLC      #####
localhost:  #####      property, and is to be used only by authorized      #####
localhost:  #####      users.  Misuse  of this computer system  is  a      #####
localhost:  #####      violation  of Federal law. All users  of  this      #####
localhost:  #####      system, whether authorized or not, are subject      #####
localhost:  #####      to monitoring  by system personnel and  by law      #####
localhost:  #####      enforcement officials. Anyone using this system     #####
localhost:  #####      expressly consents to such monitoring. Evidence     #####
localhost:  #####      of criminal activity or other misconduct may be     #####
localhost:  #####      provided   to   law   enforcement   officials.      #####
localhost:  #####      Electronic messages(e-mail) on this system are      #####
localhost:  #####      Expanor, LLC property. The Expanor may access       #####
localhost:  #####      these messages whenever such access serves a        #####
localhost:  #####      legitimate purpose.                                 #####
localhost:  #####                                                          #####
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  ####################################################################
localhost: starting secondarynamenode, logging to /opt/hadoop/hadoop/libexec/../logs/hadoop-hadoop-secondarynamenode-sama.expanor.local.out
starting jobtracker, logging to /opt/hadoop/hadoop/libexec/../logs/hadoop-hadoop-jobtracker-sama.expanor.local.out
localhost:
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  #####                                                          #####
localhost:  #####                                                          #####
localhost:  #####      WARNING** This computer system is Expanor, LLC      #####
localhost:  #####      property, and is to be used only by authorized      #####
localhost:  #####      users.  Misuse  of this computer system  is  a      #####
localhost:  #####      violation  of Federal law. All users  of  this      #####
localhost:  #####      system, whether authorized or not, are subject      #####
localhost:  #####      to monitoring  by system personnel and  by law      #####
localhost:  #####      enforcement officials. Anyone using this system     #####
localhost:  #####      expressly consents to such monitoring. Evidence     #####
localhost:  #####      of criminal activity or other misconduct may be     #####
localhost:  #####      provided   to   law   enforcement   officials.      #####
localhost:  #####      Electronic messages(e-mail) on this system are      #####
localhost:  #####      Expanor, LLC property. The Expanor may access       #####
localhost:  #####      these messages whenever such access serves a        #####
localhost:  #####      legitimate purpose.                                 #####
localhost:  #####                                                          #####
localhost:  ####################################################################
localhost:  ####################################################################
localhost:  ####################################################################
localhost: starting tasktracker, logging to /opt/hadoop/hadoop/libexec/../logs/hadoop-hadoop-tasktracker-sama.expanor.local.out
[hadoop@sama hadoop]$ jps
-bash: jps: command not found
[hadoop@sama hadoop]$ cat /opt/hadoop/hadoop/libexec/../logs/hadoop-hadoop-tasktracker-sama.expanor.local.out
ulimit -a for user hadoop
core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 16061
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 1024
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 10240
cpu time               (seconds, -t) unlimited
max user processes              (-u) 1024
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited
[hadoop@sama hadoop]$ $JAVA_HOME/bin/jps
-bash: /usr/java/latest//bin/jps: No such file or directory
[hadoop@sama hadoop]$

http://sama.expanor.local:50030/

http://sama.expanor.local:50030/jobtracker.jsp
http://sama.expanor.local:50070/dfshealth.jsp
http://sama.expanor.local:50060/tasktracker.jsp