Tuesday, November 28, 2017

Puppet:- ntp-deploy-through-inheritance....

Create a modile best
[root@pserver manifests]# pwd
/etc/puppet/modules/best/manifests
[root@pserver manifests]#

[root@pserver modules]# mkdir best
[root@pserver modules]# cd best
[root@pserver best]# ls
[root@pserver best]# pwd
/etc/puppet/modules/best
[root@pserver best]# mkdir {files,manifests}
[root@pserver best]# ls
files  manifests
[root@pserver best]# cd manifests/
[root@pserver manifests]# pwd
/etc/puppet/modules/best/manifests
[root@pserver manifests]# vi ntp.pp

class best::ntp {
        package {'ntp':
        ensure => installed,
        }

        service {'ntpd':
        ensure => running,
        require => Package['ntp'],
        }

        file {'/etc/ntp.conf':
        source => 'puppet:///modules/best/ntp.conf',
        notify => Service['ntpd'],
        require => Package['ntp'],
        }
}

[root@pserver manifests]# pwd
/etc/puppet/modules/best/manifests
[root@pserver manifests]# cd ../files/
[root@pserver files]# ls
[root@pserver files]# vi ntp_uk.conf

class best::ntp_uk inherits best::ntp {

        File['/etc/ntp.conf'] {
                source => 'puppet:///modules/best/ntp_uk.conf',
        }
}

[root@pserver files]# pwd
/etc/puppet/modules/best/files


[root@pserver files]# cd ../../../manifests/
[root@pserver manifests]# ls
site.pp
[root@pserver manifests]# vi site.pp

#include ntp
node 'node1.example.com' {
include samba
include best::ntp_uk
#include ntp
#class {'vsftpd':
#       version => '3.0.2-9',
#}
}

node 'node2.example.com' {
include httpd
}

[root@pserver manifests]# cd -
/etc/puppet/modules/best/files
[root@pserver files]# pwd
/etc/puppet/modules/best/files
[root@pserver files]# ls
ntp_uk.conf
[root@pserver files]# more ntp_uk.conf
server 192.168.10.200
[root@pserver files]# cd ../manifests/
[root@pserver manifests]# ls
ntp.pp  ntp_uk.pp


[root@pserver manifests]# more ntp_uk.pp
class admin::ntp_uk inherits best::ntp {

        File['/etc/ntpd.conf'] {
                source => 'puppet:///modules/best/ntp_uk.conf',
        }
}



go to client and run


[root@node1 ~]# puppet agent -t
Info: Retrieving pluginfacts
Info: Retrieving plugin
Info: Caching catalog for node1.example.com
Info: Applying configuration version '1511822953'
Notice: /Stage[main]/Best::Ntp/File[/etc/ntp.conf]/content:
--- /etc/ntp.conf       2014-02-11 12:18:28.000000000 -0500
+++ /tmp/puppet-file20171127-6018-86t8qm        2017-11-27 17:49:23.841400464 -0500
@@ -1,58 +1 @@
-# For more information about this file, see the man pages
-# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
-
-driftfile /var/lib/ntp/drift
-
-# Permit time synchronization with our time source, but do not
-# permit the source to query or modify the service on this system.
-restrict default nomodify notrap nopeer noquery
-
-# Permit all access over the loopback interface.  This could
-# be tightened as well, but to do so would effect some of
-# the administrative functions.
-restrict 127.0.0.1
-restrict ::1
-
-# Hosts on local network are less restricted.
-#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
-
-# Use public servers from the pool.ntp.org project.
-# Please consider joining the pool (http://www.pool.ntp.org/join.html).
-server 0.rhel.pool.ntp.org iburst
-server 1.rhel.pool.ntp.org iburst
-server 2.rhel.pool.ntp.org iburst
-server 3.rhel.pool.ntp.org iburst
-
-#broadcast 192.168.1.255 autokey       # broadcast server
-#broadcastclient                       # broadcast client
-#broadcast 224.0.1.1 autokey           # multicast server
-#multicastclient 224.0.1.1             # multicast client
-#manycastserver 239.255.254.254                # manycast server
-#manycastclient 239.255.254.254 autokey # manycast client
-
-# Enable public key cryptography.
-#crypto
-
-includefile /etc/ntp/crypto/pw
-
-# Key file containing the keys and key identifiers used when operating
-# with symmetric key cryptography.
-keys /etc/ntp/keys
-
-# Specify the key identifiers which are trusted.
-#trustedkey 4 8 42
-
-# Specify the key identifier to use with the ntpdc utility.
-#requestkey 8
-
-# Specify the key identifier to use with the ntpq utility.
-#controlkey 8
-
-# Enable writing of statistics records.
-#statistics clockstats cryptostats loopstats peerstats
-
-# Disable the monitoring facility to prevent amplification attacks using ntpdc
-# monlist command when default restrict does not include the noquery flag. See
-# CVE-2013-5211 for more details.
-# Note: Monitoring will not be disabled with the limited restriction flag.
-disable monitor
+server 192.168.10.200

Info: Computing checksum on file /etc/ntp.conf
Info: /Stage[main]/Best::Ntp/File[/etc/ntp.conf]: Filebucketed /etc/ntp.conf to puppet with sum 913                                                                                                                                          c85f0fde85f83c2d6c030ecf259e9
Notice: /Stage[main]/Best::Ntp/File[/etc/ntp.conf]/content: content changed '{md5}913c85f0fde85f83c                                                                                                                                          2d6c030ecf259e9' to '{md5}489b4442e80b2fbcec12b167d15a63b9'
Info: /Stage[main]/Best::Ntp/File[/etc/ntp.conf]: Scheduling refresh of Service[ntpd]
Notice: /Stage[main]/Best::Ntp/Service[ntpd]: Triggered 'refresh' from 1 events
Notice: Finished catalog run in 0.42 seconds
[root@node1 ~]# ntpq -q
/usr/sbin/ntpq: illegal option -- q
ntpq - standard NTP query program - Ver. 4.2.6p5
USAGE:  ntpq [ -<flag> [<val>] | --<name>[{=| }<val>] ]... [ host ...]
(AutoOpts bug):  could not locate the 'help' option.
[root@node1 ~]# ntpq -p
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
 pserver.example .INIT.          16 u    -   64    0    0.000    0.000   0.000
[root@node1 ~]#


Sunday, November 26, 2017

Puppet:- httpd deploy

Puppet:- httpd deploy

Web server (httpd) deployment through puppet

On server, create a module http
[root@pserver manifests]# pwd
/etc/puppet/manifests
[root@pserver manifests]# cd ../modules/
[root@pserver modules]# ls
httpd  samba
[root@pserver modules]# cd httpd/
[root@pserver httpd]# ls
files  manifests
[root@pserver httpd]# cd files/
[root@pserver files]# ls
httpd.conf  index.html
[root@pserver files]# pwd
/etc/puppet/modules/httpd/files
[root@pserver files]# cd ../manifests/
[root@pserver manifests]# ls
init.pp
[root@pserver manifests]# cat init.pp
class httpd {
package {'httpd':
        ensure => present,
        before => Service['httpd'],
}
        file {'conffile':
        ensure => file,
        path => '/etc/httpd/conf.d/httpd.conf',
        #source => '/etc/puppet/modules/httpd/files/httpd.conf',
        source => "puppet:///modules/httpd/httpd.conf",
        require => Package['httpd'],
        notify => Service['httpd'],
}
        file{'htmlfile':
        ensure => file,
        path => '/var/www/html/index.html',
        source => 'puppet:///modules/httpd/index.html',
        require => Package['httpd'],
}
service {'httpd':
        ensure => running,
        enable => true,
}
 }
[root@pserver manifests]# pwd
/etc/puppet/modules/httpd/manifests
[root@pserver manifests]# more /etc/puppet/puppet.conf
[main]
certname=pserver.example.com
    # The Puppet log directory.
    # The default value is '$vardir/log'.
    logdir = /var/log/puppet

    # Where Puppet PID files are kept.
    # The default value is '$vardir/run'.
    rundir = /var/run/puppet

    # Where SSL certificates are kept.
    # The default value is '$confdir/ssl'.
    ssldir = $vardir/ssl

[agent]
    # The file in which puppetd stores a list of the classes
    # associated with the retrieved configuratiion.  Can be loaded in
    # the separate ``puppet`` executable using the ``--loadclasses``
    # option.
    # The default value is '$confdir/classes.txt'.
    classfile = $vardir/classes.txt

    # Where puppetd caches the local configuration.  An
    # extension indicating the cache format is added automatically.
    # The default value is '$confdir/localconfig'.
    localconfig = $vardir/localconfig
[root@pserver manifests]#
[root@pserver manifests]# cd /etc/puppet/manifests
[root@pserver manifests]# cat site.pp
include httpd


[root@pserver httpd]# ls
files  manifests
[root@pserver httpd]# cd files
[root@pserver files]# ls
httpd.conf  index.html
[root@pserver files]# cat index.html
<h1> This is my puppet test file. </h1>
[root@pserver files]# cat httpd.conf
<virtualhost *:80>
servername pserver.example.com
DocumentRoot /var/www/html
</virtualhost>
[root@pserver files]#


Now, go to agent and run the puppet agent -t command

[root@node1 ~]# rpm -qa | grep -i htpd
[root@node1 ~]# puppet agent -t
Info: Retrieving pluginfacts
Info: Retrieving plugin
Info: Caching catalog for node1.example.com
Info: Applying configuration version '1511743302'
Notice: /Stage[main]/Httpd/Package[httpd]/ensure: created
Notice: /Stage[main]/Httpd/File[htmlfile]/content:
--- /var/www/html/index.html    2017-11-25 18:29:08.961053523 -0500
+++ /tmp/puppet-file20171126-14861-1bq6tj0      2017-11-26 19:41:44.797668250 -0500
@@ -1 +1 @@
-<h1>This is my puppet test.</h1>
+<h1> This is my puppet test file. </h1>

Info: Computing checksum on file /var/www/html/index.html
Info: /Stage[main]/Httpd/File[htmlfile]: Filebucketed /var/www/html/index.html to puppet with sum 4e50ac7cea7d16f3e5dfd938e9f5bd23
Notice: /Stage[main]/Httpd/File[htmlfile]/content: content changed '{md5}4e50ac7cea7d16f3e5dfd938e9f5bd23' to '{md5}f2afaa35c9d79f70c0c8569e3ad50bcc'
Notice: /Stage[main]/Httpd/File[conffile]/content:
--- /etc/httpd/conf.d/httpd.conf        2017-11-25 18:29:08.996053335 -0500
+++ /tmp/puppet-file20171126-14861-1vw56pm      2017-11-26 19:41:45.017667181 -0500
@@ -1,4 +1,4 @@
-<VirtualHost *:80>
-Servername pserver.example.com
+<virtualhost *:80>
+servername pserver.example.com
 DocumentRoot /var/www/html
-</VirtualHost>
+</virtualhost>

Info: Computing checksum on file /etc/httpd/conf.d/httpd.conf
Info: /Stage[main]/Httpd/File[conffile]: Filebucketed /etc/httpd/conf.d/httpd.conf to puppet with sum 1c286cf8c917e3966fdb2f7aa1936be1
Notice: /Stage[main]/Httpd/File[conffile]/content: content changed '{md5}1c286cf8c917e3966fdb2f7aa1936be1' to '{md5}8696bcdea55fdfd6036af2730cc538aa'
Info: /Stage[main]/Httpd/File[conffile]: Scheduling refresh of Service[httpd]
Notice: /Stage[main]/Httpd/Service[httpd]/ensure: ensure changed 'stopped' to 'running'
Info: /Stage[main]/Httpd/Service[httpd]: Unscheduling refresh on Service[httpd]
Notice: Finished catalog run in 2.97 seconds
[root@node1 ~]# systemctl status httpd
httpd.service - The Apache HTTP Server
   Loaded: loaded (/usr/lib/systemd/system/httpd.service; enabled)
   Active: active (running) since Sun 2017-11-26 19:41:45 EST; 11s ago
 Main PID: 15046 (httpd)
   Status: "Total requests: 0; Current requests/sec: 0; Current traffic:   0 B/sec"
   CGroup: /system.slice/httpd.service
           ├─15046 /usr/sbin/httpd -DFOREGROUND
           ├─15047 /usr/sbin/httpd -DFOREGROUND
           ├─15048 /usr/sbin/httpd -DFOREGROUND
           ├─15049 /usr/sbin/httpd -DFOREGROUND
           ├─15050 /usr/sbin/httpd -DFOREGROUND
           └─15051 /usr/sbin/httpd -DFOREGROUND

Nov 26 19:41:45 node1.example.com systemd[1]: Started The Apache HTTP Server.
[root@node1 ~]#


if you want to specify specific host, edit the site.pp file under manifest.

[root@pserver manifests]# cat site.pp
node 'node1.example.com' {
include samba
}

node 'node2.example.com' {
include httpd
}
[root@pserver manifests]#

Puppet:- Samba deployment

Puppet:- Samba deployment

[root@pserver manifests]# cat init.pp
class samba {
        package {'samba':
                ensure => 'present',
        }
        package {'samba-client':
                ensure => 'present',
        }
#       file {'smb.conf':
#               path    => '/etc/samba/smb.conf',
#               source  => 'puppet:///modiles/samba/smb.conf',
#               require => Package['samba'],
#               subscribe => Service['smb'],
#       }

        user {'jay':
                ensure => 'present',
                managehome => 'true',
                require => Package['samba'],
        }

#       exec {'set smb passwd for jay':
#               shell => "/bin/echo -e "redhat\nredhat" | /usr/bin/smbpasswd -s -a jay"
##              require => Package['samba-client'],
#       }
        service {'smb':
                ensure => 'running',
#               enable => 'true',
        }
}
[root@pserver manifests]# pwd
/etc/puppet/modules/samba/manifests
[root@pserver manifests]# cd ../../../manifests/
[root@pserver manifests]# cat site.pp
include httpd
include samba
[root@pserver manifests]#


go to agent and run

[root@node2 puppet]# puppet agent -t
Info: Retrieving pluginfacts
Info: Retrieving plugin
Info: Caching catalog for node2.example.com
Info: Applying configuration version '1511746320'
Notice: /Stage[main]/Samba/User[jay]/ensure: created
Notice: Finished catalog run in 0.36 seconds
[root@node2 puppet]# id jay
uid=2002(jay) gid=2002(jay) groups=2002(jay)
[root@node2 puppet]# ls -ld /home/jay
drwx------. 3 jay jay 4096 Nov 26 20:32 /home/jay
[root@node2 puppet]#


[root@node2 puppet]# systemctl status smb
smb.service - Samba SMB Daemon
   Loaded: loaded (/usr/lib/systemd/system/smb.service; disabled)
   Active: active (running) since Sun 2017-11-26 20:30:07 EST; 14s ago
 Main PID: 18255 (smbd)
   Status: "smbd: ready to serve connections..."
   CGroup: /system.slice/smb.service
           ├─18255 /usr/sbin/smbd
           └─18257 /usr/sbin/smbd

Nov 26 20:30:07 node2.example.com smbd[18255]: [2017/11/26 20:30:07.892874,  0] ../lib/u...y)
Nov 26 20:30:07 node2.example.com systemd[1]: Started Samba SMB Daemon.
Hint: Some lines were ellipsized, use -l to show in full.


if you want to specify specific host, edit the site.pp file under manifest.

[root@pserver manifests]# cat site.pp
node 'node1.example.com' {
include samba
}

node 'node2.example.com' {
include httpd
}
[root@pserver manifests]#

Tuesday, September 5, 2017

Puppet: motd deployment on puppet enterprise 2016

motd deployment on puppet enterprise 2016

create class
create module

Create motd class

1. Create an module environment
[root@devpup]# cd /etc/puppetlabs/code/environments/production/modules

2. Create a module
[root@devpup]# mkdir motd; cd motd; pwd
/etc/puppetlabs/code/environments/production/modules/motd
[root@devpup]#

3. Create these main directories
[root@devpup]# ls
[root@devpup]# mkdir {files,manifests,templates,tests}
[root@devpup]# ls
files  manifests  templates  tests
[root@devpup]# cd manifests/
[root@devpup]# ls
[root@devpup]# pwd
/etc/puppetlabs/code/environments/production/modules/motd/manifests

4. Create class motd and declere resources
note: Resource such as package, file, directory
go to docs.puppet.com for detail

=> - hash rocket

a. Create a file init.pp with folliwing contents
inside this class, we have to define resource

[root@devpup]# vi init.pp

class motd {
        file { "/etc/motd":
                ensure => 'file',
                source => "puppet:///modules/motd/motd    # [on puppet master, inside motd dir, named motd dir ]
        }
}

[root@devpup]# pwd
/etc/puppetlabs/code/environments/production/modules/motd/manifests
[root@devpup]# cd ..
[root@devpup]#

b. Go onto to modules directory and go to files and create motd file

[root@devpup]# ls
files  manifests  templates  tests
[root@devpup]# cd files
[root@devpup]# vi motd
[root@devpup]# cat motd
This is a puppet implementation of motd

do not modify this file, change will be
lost once it is run from puppet master.


-TechTeam
[root@devpup]#

5. Now, you completed motd class.
Now, go back to module dir and go to test dir and create init.pp file with following contents
on init.pp file, you just include the class you created.

[root@devpup]# cd ../tests/
# vi init.pp
include motd

save and exit

6. Now, run the command puppet apply --noop # noop will not apply, it mean, do not apply, just test
# puppet apply --noop init.pp

[root@devpup]# puppet apply --noop init.pp
Notice: Compiled catalog for devpup.expanor.local in environment production in 0.15 seconds
Notice: /Stage[main]/Motd/File[/etc/motd]/content: current_value {md5}d41d8cd98f00b204e9800998ecf8427e, should be {md5}2d6311c2aa4f799ee9cb43b2642f3b8d (noop)
Notice: Class[Motd]: Would have triggered 'refresh' from 1 events
Notice: Stage[main]: Would have triggered 'refresh' from 1 events
Notice: Applied catalog in 0.41 seconds

[root@puppetdev ~]# cat /etc/motd
[root@puppetdev ~]# cp -p /etc/motd /etc/motd.origg
[root@puppetdev ~]#

[root@devpup]# ls -l /etc/motd
-rw-r--r--. 1 root root 0 Jun  7  2013 /etc/motd
[root@devpup]# cp -p /etc/motd /etc/motd.origg

7. Once you verify the output, run the command puppet apply followed by the init.pp file.
[root@devpup]# puppet apply init.pp
Notice: Compiled catalog for devpup.expanor.local in environment production in 0.12 seconds
Notice: /Stage[main]/Motd/File[/etc/motd]/content: content changed '{md5}d41d8cd98f00b204e9800998ecf8427e' to '{md5}2d6311c2aa4f799ee9cb43b2642f3b8d'
Notice: Applied catalog in 0.41 seconds
[root@devpup]# cat /etc/motd
This is a puppet implementation of motd

do not modify this file, change will be
lost once it is run from puppet master.


-TechTeam
[root@devpup]#

you successfully tested on puppet server which is also a client. Now, you have to test on node (client).

8. Now, you have to apply this change to client.
What we have to do is the node classification.

Now, go to main manifests directory and edit site.pp file.
[root@devpup]# cd /etc/puppetlabs/code/environments/production/manifests

go all the way down and this is where you define node class.
you can define under new section with particular host [create a new one.] or under default node section which will apply to all.
In this example, we will add entry under default node.

[root@devpup]# vi site.pp
node default {
        class { 'motd': }
        notify {"Just testing here": }
}

9. Once you make change to this file, save it.
Now, go back to agent node and execute the command puppet agent -t command.

[root@puppetdev ~]# puppet agent -t
Warning: Unable to fetch my node definition, but the agent run will continue:
Warning: getaddrinfo: Temporary failure in name resolution
Info: Retrieving pluginfacts
Error: /File[/opt/puppetlabs/puppet/cache/facts.d]: Failed to generate additional resources using 'eval_generate': getaddrinfo: Temporary failure in name resolution
Error: /File[/opt/puppetlabs/puppet/cache/facts.d]: Could not evaluate: Could not retrieve file metadata for puppet:///pluginfacts: getaddrinfo: Temporary failure in name resolution
Info: Retrieving plugin
Error: /File[/opt/puppetlabs/puppet/cache/lib]: Failed to generate additional resources using 'eval_generate': getaddrinfo: Temporary failure in name resolution
Error: /File[/opt/puppetlabs/puppet/cache/lib]: Could not evaluate: Could not retrieve file metadata for puppet:///plugins: getaddrinfo: Temporary failure in name resolution
Info: Loading facts
Error: Could not retrieve catalog from remote server: getaddrinfo: Temporary failure in name resolution
Warning: Not using cache on failed catalog
Error: Could not retrieve catalog; skipping run
Error: Could not send report: getaddrinfo: Temporary failure in name resolution
[root@puppetdev ~]# ping devpup.expanor.local
PING devpup.expanor.local (192.168.10.16) 56(84) bytes of data.
64 bytes from devpup.expanor.local (192.168.10.16): icmp_seq=1 ttl=64 time=0.138 ms
64 bytes from devpup.expanor.local (192.168.10.16): icmp_seq=2 ttl=64 time=0.145 ms
^C
--- devpup.expanor.local ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.138/0.141/0.145/0.012 ms
[root@puppetdev ~]# hostname
puppetdev.expanor.local
[root@puppetdev ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; enabled; vendor preset: enabled)
   Active: active (running) since Sat 2017-09-02 12:08:46 EDT; 3 days ago
 Main PID: 706 (firewalld)
   CGroup: /system.slice/firewalld.service
           └─706 /usr/bin/python -Es /usr/sbin/firewalld --nofork --nopid

Sep 02 12:08:45 puppetdev.expanor.local systemd[1]: Starting firewalld - dynami...
Sep 02 12:08:46 puppetdev.expanor.local systemd[1]: Started firewalld - dynamic...
Hint: Some lines were ellipsized, use -l to show in full.
[root@puppetdev ~]# systemctl stop firewalld
[root@puppetdev ~]#

run the puppet agent -t command again. looks like firewall is blocking it..

[root@puppetdev ~]# puppet agent -t
Warning: Unable to fetch my node definition, but the agent run will continue:
Warning: getaddrinfo: Temporary failure in name resolution
Info: Retrieving pluginfacts
Error: /File[/opt/puppetlabs/puppet/cache/facts.d]: Failed to generate additional
Error: /File[/opt/puppetlabs/puppet/cache/facts.d]: Could not evaluate: Could not me resolution
Info: Retrieving plugin
Error: /File[/opt/puppetlabs/puppet/cache/lib]: Failed to generate additional reso
Error: /File[/opt/puppetlabs/puppet/cache/lib]: Could not evaluate: Could not retrieve file metadata for puppet:///plugins: getaddrinfo: Temporary failure in name resolution
Info: Loading facts
Error: Could not retrieve catalog from remote server: getaddrinfo: Temporary failure in name resolution
Warning: Not using cache on failed catalog
Error: Could not retrieve catalog; skipping run
Error: Could not send report: getaddrinfo: Temporary failure in name resolution
[root@puppetdev ~]# grep devpup.expanor.local /etc/hosts
#192.168.10.16   devpup.expanor.local devpup puppet
192.168.10.16   devpup.expanor.local devpup
[root@puppetdev ~]# puppet agent -t --verbose --server devpup.expanor.local
Info: Using configured environment 'production'
Info: Retrieving pluginfacts
Info: Retrieving plugin
Info: Loading facts
Info: Caching catalog for puppetdev.expanor.local
Info: Applying configuration version '1504635836'
Notice: /Stage[main]/Motd/File[/etc/motd]/content:
--- /etc/motd   2013-06-07 10:31:32.000000000 -0400
+++ /tmp/puppet-file20170905-24243-r5tj39       2017-09-05 14:21:41.362462828 -0400
@@ -0,0 +1 @@
+my motd

Notice: /Stage[main]/Motd/File[/etc/motd]/content: content changed '{md5}d41d8cd98f00b204e9800998ecf8427e' to '{md5}2d6311c2aa4f799ee9cb43b2642f3b8d'
Notice: Just testing here
Notice: /Stage[main]/Main/Node[default]/Notify[Just testing here]/message: defined 'message' as 'Just testing here'
Notice: Applied catalog in 0.68 seconds
[root@puppetdev ~]#


------------------
run following on puppet server

[root@devpup]# netstat -an | grep 8140
tcp6       0      0 :::8140                 :::*                    LISTEN
tcp6       0      0 192.168.10.16:8140      192.168.10.25:57707     ESTABLISHED
[root@devpup]# man nscd
[root@devpup]# nscd -i files
bash: nscd: command not found...



[root@devpup]# puppet resource service puppetserve ensure=running
Error: Execution of 'journalctl -n 50 --since '5 minutes ago' -u puppetserve --no-pager' ret                                                                            urned 1: -- Logs begin at Sat 2017-09-02 08:18:50 EDT, end at Tue 2017-09-05 15:45:01 EDT. -                                                                            -
Error: /Service[puppetserve]/ensure: change from stopped to running failed: Execution of 'jo                                                                            urnalctl -n 50 --since '5 minutes ago' -u puppetserve --no-pager' returned 1: -- Logs begin                                                                             at Sat 2017-09-02 08:18:50 EDT, end at Tue 2017-09-05 15:45:01 EDT. --
service { 'puppetserve':
  ensure => 'stopped',
}
[root@devpup]# puppet resource service puppetserver enable=true
Error: Could not unmask puppetserver:
Error: /Service[puppetserver]/enable: change from false to true failed: Could not unmask pup                                                                            petserver:
service { 'puppetserver':
  enable => 'false',
}
[root@devpup]#

--------------------


Go to the client node and run it again,

[root@puppetdev Packages]# puppet agent -t
Warning: Unable to fetch my node definition, but the agent run will continue:
Warning: getaddrinfo: Temporary failure in name resolution
Info: Retrieving pluginfacts
Error: /File[/opt/puppetlabs/puppet/cache/facts.d]: Failed to generate additional resources using 'eval_generate': getaddrinfo: Temporary failure in name resolution
Error: /File[/opt/puppetlabs/puppet/cache/facts.d]: Could not evaluate: Could not retrieve file metadata for puppet:///pluginfacts: getaddrinfo: Temporary failure in name resolution
Info: Retrieving plugin
Error: /File[/opt/puppetlabs/puppet/cache/lib]: Failed to generate additional resources using 'eval_generate': getaddrinfo: Temporary failure in name resolution
Error: /File[/opt/puppetlabs/puppet/cache/lib]: Could not evaluate: Could not retrieve file metadata for puppet:///plugins: getaddrinfo: Temporary failure in name resolution
Info: Loading facts
Error: Could not retrieve catalog from remote server: getaddrinfo: Temporary failure in name resolution
Warning: Not using cache on failed catalog
Error: Could not retrieve catalog; skipping run
Error: Could not send report: getaddrinfo: Temporary failure in name resolution

it was not working but after a while, I think the default push time is 30 minutes, the motd file was updated.

[root@puppetdev Packages]# cat /etc/motd
This is a puppet implementation of motd

do not modify this file, change will be
lost once it is run from puppet master.


-TechTeam
[root@puppetdev Packages]#



define node

[root@devpup]# pwd
/etc/puppetlabs/code/environments/production/manifests
[root@devpup]# vi site.pp

node "puppetdev.expanor.local" {
        notify { 'This is a test notify': }
}


[root@devpup]# pwd
/etc/puppetlabs/code/environments/production
[root@devpup]# ls
environment.conf  hieradata  manifests  modules
[root@devpup]# ls -lR /etc/puppetlabs/code/environments/production
/etc/puppetlabs/code/environments/production:
total 4
-rw-r--r--. 1 pe-puppet pe-puppet 879 Oct 11  2016 environment.conf
drwxr-xr-x. 2 pe-puppet pe-puppet   6 Oct 11  2016 hieradata
drwxr-xr-x. 2 pe-puppet pe-puppet  40 Sep  5 17:38 manifests
drwxr-xr-x. 3 pe-puppet pe-puppet  17 Sep  5 13:44 modules

/etc/puppetlabs/code/environments/production/hieradata:
total 0

/etc/puppetlabs/code/environments/production/manifests:
total 8
-rw-r-----. 1 pe-puppet pe-puppet 1538 Sep  5 17:38 site.pp
-rw-r-----. 1 root      root      1309 Sep  2 13:27 site.pp.origg

/etc/puppetlabs/code/environments/production/modules:
total 0
drwxr-xr-x. 6 root root 62 Sep  5 13:46 motd

/etc/puppetlabs/code/environments/production/modules/motd:
total 0
drwxr-xr-x. 2 root root 17 Sep  5 17:39 files
drwxr-xr-x. 2 root root 20 Sep  5 13:56 manifests
drwxr-xr-x. 2 root root  6 Sep  5 13:46 templates
drwxr-xr-x. 2 root root 20 Sep  5 14:01 tests

/etc/puppetlabs/code/environments/production/modules/motd/files:
total 4
-rw-r--r--. 1 root root 144 Sep  5 17:39 motd

/etc/puppetlabs/code/environments/production/modules/motd/manifests:
total 4
-rw-r--r--. 1 root root 103 Sep  5 13:56 init.pp

/etc/puppetlabs/code/environments/production/modules/motd/templates:
total 0

/etc/puppetlabs/code/environments/production/modules/motd/tests:
total 4
-rw-r--r--. 1 root root 13 Sep  5 14:01 init.pp
[root@devpup]#


Wednesday, June 28, 2017

Browser - Chrome restore session before the crash


While I was searching and reading some links content, my chrome browser crashed. When I restarted, I didn't see the restore last sessions like on firefox. Here is a way to fix it.


perform following tasks to restore your session on your browser

1. Click the Chrome Menu in the upper right corner with three dots lined up.
2. Click on settings
3. Click on main menu on left top corner
4. On drop down menu, click on on startup
5. select a radio button, "continue where you left off"

To test,
1. Open couple of tab and open some web pages
2. kill the crom process or close the browser
3. Open your browser and you should be restore your session.

Tuesday, June 27, 2017

Solaris 10 - zone creation


1. Login to control Domain,

login as: root
Using keyboard-interactive authentication.
Password:
Last login: Sun Jun 25 08:36:43 2017
Oracle Corporation      SunOS 5.10      Generic Patch   January 2005
# bash
bash-3.2# df -h
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10x_u11wos_24a    12G   4.4G   5.3G    46%    /

bash-3.2# echo | format
Searching for disks...
Inquiry failed for this logical diskdone


AVAILABLE DISK SELECTIONS:
       0. c0d0 <▒x▒▒▒▒▒▒▒▒▒@▒▒▒ cyl 1565 alt 2 hd 255 sec 63>
          /pci@0,0/pci-ide@7,1/ide@0/cmdk@0,0
Specify disk (enter its number): Specify disk (enter its number):
bash-3.2# zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpool  11.9G  6.39G  5.55G  53%  ONLINE  -
bash-3.2# zfs list
NAME                         USED  AVAIL  REFER  MOUNTPOINT
rpool                       6.45G  5.30G  42.5K  /rpool
rpool/ROOT                  4.39G  5.30G    31K  legacy
rpool/ROOT/s10x_u11wos_24a  4.39G  5.30G  4.39G  /
rpool/dump                  1.00G  5.30G  1.00G  -
rpool/export                  76K  5.30G    32K  /export
rpool/export/home             44K  5.30G    44K  /export/home
rpool/swap                  1.06G  5.36G  1.00G  -
bash-3.2# ping 192.168.10.135
192.168.10.135 is alive
bash-3.2# ssh 192.168.10.135
^C
bash-3.2# pwd
/
bash-3.2# mkdir /export/home/bishal-zone
bash-3.2# zonecfg -z bishal
bishal: No such zone configured
Use 'create' to begin configuring a new zone.
zonecfg:bishal> create
zonecfg:bishal> set zonepath=/export/home/bishal-zone
zonecfg:bishal> set autoboot=true
zonecfg:bishal> add net
zonecfg:bishal:net> set physical=e1000g0
zonecfg:bishal:net> set address=192.168.10.221
zonecfg:bishal:net> end
zonecfg:bishal> add attr
zonecfg:bishal:attr> set name=comment
zonecfg:bishal:attr> set type=string
zonecfg:bishal:attr> set value="Bishals zone"
zonecfg:bishal:attr> end
zonecfg:bishal> verify
zonecfg:bishal> commit
zonecfg:bishal> info
zonename: bishal
zonepath: /export/home/bishal-zone
brand: native
autoboot: true
bootargs:
pool:
limitpriv:
scheduling-class:
ip-type: shared
hostid:
inherit-pkg-dir:
        dir: /lib
inherit-pkg-dir:
        dir: /platform
inherit-pkg-dir:
        dir: /sbin
inherit-pkg-dir:
        dir: /usr
net:
        address: 192.168.10.221
        physical: e1000g0
        defrouter not specified
attr:
        name: comment
        type: string
        value: "Bishals zone"
zonecfg:bishal> exit
bash-3.2# zonecfg -z bishal export | more
create -b
set zonepath=/export/home/bishal-zone
set autoboot=true
set ip-type=shared
add inherit-pkg-dir
set dir=/lib
end
add inherit-pkg-dir
set dir=/platform
end
add inherit-pkg-dir
set dir=/sbin
end
add inherit-pkg-dir
set dir=/usr
end
add net
set address=192.168.10.221
set physical=e1000g0
end
add attr
set name=comment
set type=string
set value="Bishals zone"
end
bash-3.2#  zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              native   shared
   - bishal           configured /export/home/bishal-zone       native   shared
bash-3.2# zoneadm -z bishal install
/export/home/bishal-zone must not be group readable.
/export/home/bishal-zone must not be group executable.
/export/home/bishal-zone must not be world readable.
/export/home/bishal-zone must not be world executable.
could not verify zonepath /export/home/bishal-zone because of the above errors.
zoneadm: zone bishal failed to verify
bash-3.2# ls -ld /export/home/bishal-zone
drwxr-xr-x   2 root     root           2 Jun 25 11:56 /export/home/bishal-zone
bash-3.2# chmod 700 /export/home/bishal-zone
bash-3.2# zoneadm -z bishal install
A ZFS file system has been created for this zone.
Preparing to install zone <bishal>.
Creating list of files to copy from the global zone.
Copying <2711> files to the zone.
Initializing zone product registry.
Determining zone package initialization order.
Preparing to initialize <1244> packages on the zone.
Initialized <1244> packages on zone.
Zone <bishal> is initialized.
The file </export/home/bishal-zone/root/var/sadm/system/logs/install_log> contains a log of the zone installation.
bash-3.2# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              native   shared
   - bishal           installed  /export/home/bishal-zone       native   shared
bash-3.2# cd /export/home/bishal-zone/
bash-3.2# ls
root
bash-3.2# cd root
bash-3.2# ls
bin       etc       home      lib       opt       proc      system    usr
dev       export    kernel    mnt       platform  sbin      tmp       var
bash-3.2# df -h .
Filesystem             size   used  avail capacity  Mounted on
rpool/export/home/bishal-zone
                        12G    76M   5.2G     2%    /export/home/bishal-zone
bash-3.2# ifconfig -a
lo0: flags=2001000849<UP,LOOPBACK,RUNNING,MULTICAST,IPv4,VIRTUAL> mtu 8232 index 1
        inet 127.0.0.1 netmask ff000000
e1000g0: flags=1004843<UP,BROADCAST,RUNNING,MULTICAST,DHCP,IPv4> mtu 1500 index 2
        inet 192.168.10.20 netmask ffffff00 broadcast 192.168.10.255
        ether 0:c:29:e:4a:65
bash-3.2# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              native   shared
   - bishal           installed  /export/home/bishal-zone       native   shared
bash-3.2# zoneadm -z bishal boot
zoneadm: zone 'bishal': WARNING: e1000g0:1: no matching subnet found in netmasks(4) for 192.168.10.221; using default of 255.255.255.0.
bash-3.2# ifconfig -a
lo0: flags=2001000849<UP,LOOPBACK,RUNNING,MULTICAST,IPv4,VIRTUAL> mtu 8232 index 1
        inet 127.0.0.1 netmask ff000000
lo0:1: flags=2001000849<UP,LOOPBACK,RUNNING,MULTICAST,IPv4,VIRTUAL> mtu 8232 index 1
        zone bishal
        inet 127.0.0.1 netmask ff000000
e1000g0: flags=1004843<UP,BROADCAST,RUNNING,MULTICAST,DHCP,IPv4> mtu 1500 index 2
        inet 192.168.10.20 netmask ffffff00 broadcast 192.168.10.255
        ether 0:c:29:e:4a:65
e1000g0:1: flags=1000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4> mtu 1500 index 2
        zone bishal
        inet 192.168.10.221 netmask ffffff00 broadcast 192.168.10.255
bash-3.2# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              native   shared
   1 bishal           running    /export/home/bishal-zone       native   shared
bash-3.2# zoneadm -z bishal reboot
zoneadm: zone 'bishal': WARNING: e1000g0:1: no matching subnet found in netmasks(4) for 192.168.10.221; using default of 255.255.255.0.
bash-3.2# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              native   shared
   2 bishal           running    /export/home/bishal-zone       native   shared
bash-3.2# zlogin -C bishal
[Connected to zone 'bishal' console]


Select a Language

  0. English
  1. es
  2. fr

Please make a choice (0 - 2), or press h or ? for help: 0


Select a Locale

  0. English (C - 7-bit ASCII)
  1. Canada (English) (UTF-8)
  2. Canada-English (ISO8859-1)
  3. U.S.A. (UTF-8)
  4. U.S.A. (en_US.ISO8859-1)
  5. U.S.A. (en_US.ISO8859-15)
  6. Go Back to Previous Screen

Please make a choice (0 - 6), or press h or ? for help: 0


What type of terminal are you using?
 1) ANSI Standard CRT
 2) DEC VT52
 3) DEC VT100
 4) Heathkit 19
 5) Lear Siegler ADM31
 6) PC Console
 7) Sun Command Tool
 8) Sun Workstation
 9) Televideo 910
 10) Televideo 925
 11) Wyse Model 50
 12) X Terminal Emulator (xterms)
 13) CDE Terminal Emulator (dtterm)
 14) Other
Type the number of your choice and press Return: 12
Creating new rsa public/private host key pair
Creating new dsa public/private host key pair
Configuring network interface addresses: e1000g0.
q Host Name for e1000g0:1 qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  Enter the host name which identifies this system on the network.  The name
  must be unique within your domain; creating a duplicate host name will cause
  problems on the network after you install Solaris.

  A host name must have at least one character; it can contain letters,
  digits, and minus signs (-).


             Host name for e1000g0:1 bishal
                                     bishal



qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F6_Help
q Confirm Information for e1000g0:1 qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  > Confirm the following information.  If it is correct, press F2;
    to change any information, press F4.


                          Host name: bishal




qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
  Just a moment... F4_Change    F6_Help
q Configure Security Policy: qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  Specify Yes if the system will use the Kerberos security mechanism.

  Specify No if this system will use standard UNIX security.

      Configure Kerberos Security
      qqqqqqqqqqqqqqqqqqqqqqqqqqq
      [ ] Yes
      [X] No





qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F6_Help
q Confirm Information qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  > Confirm the following information.  If it is correct, press F2;
    to change any information, press F4.


        Configure Kerberos Security: No




qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
  Please wait...   F4_Change    F6_Help
q Name Service qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  On this screen you must provide name service information.  Select the name
  service that will be used by this system, or None if your system will either
  not use a name service at all, or if it will use a name service not listed
  here.

  > To make a selection, use the arrow keys to highlight the option
    and press Return to mark it [X].


      Name service
      qqqqqqqqqqqq
      [X] NIS+
      [ ] NIS
      [ ] DNS
      [ ] LDAP
      [ ] None
        ]
       X]


qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F6_Help
q Confirm Information qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  > Confirm the following information.  If it is correct, press F2;
    to change any information, press F4.


                       Name service: None




qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
  Just a moment... F4_Change    F6_Help
q NFSv4 Domain Name qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  NFS version 4 uses a domain name that is automatically derived from the
  system's naming services. The derived domain name is sufficient for most
  configurations. In a few cases, mounts that cross domain boundaries might
  cause files to appear to be owned by "nobody" due to the lack of a common
  domain name.

  The current NFSv4 default domain is: ""


      NFSv4 Domain Configuration
      qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
      [X] Use the NFSv4 domain derived by the system
      [ ] Specify a different NFSv4 domain



qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F6_Help
q Confirm Information for NFSv4 Domain qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  > Confirm the following information.  If it is correct, press F2;
    to change any information, press F4.


                 NFSv4 Domain Name:  << Value to be derived dynamically >>



qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F4_Change    F6_Help
q Time Zone qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  On this screen you must specify your default time zone.  You can specify a
  time zone in three ways:  select one of the continents or oceans from the
  list, select other - offset from GMT, or other - specify time zone file.

  > To make a selection, use the arrow keys to highlight the option and
    press Return to mark it [X].


      Continents and Oceans
      qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
  -   [ ] Africa
  x   [ ] Americas
  x   [ ] Antarctica
  x   [X] Arctic Ocean
  x   [ ] Asia
  x   [ ] Atlantic Ocean
  x   [ ] Australia
  x   [ ] Europe
  v   [ ] Indian Ocean

qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F6_Help
q Country or Region qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  > To make a selection, use the arrow keys to highlight the option and
    press Return to mark it [X].


      Countries and Regions
      qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
  -   [ ] United States
  x   [ ] Anguilla
  x   [X] Antigua & Barbuda
  x   [ ] Argentina
  x   [ ] Aruba
  x   [ ] Bahamas
  x   [ ] Barbados
  x   [ ] Belize
  x   [ ] Bolivia
  x   [ ] Bonaire Sint Eustatius & Saba
  x   [ ] Brazil
  x   [ ] Canada
  v   [ ] Cayman Islands

qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F6_Help
q Time Zone qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  > To make a selection, use the arrow keys to highlight the option and
    press Return to mark it [X].


      Time zones
      qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
  -   [ ] Eastern Time
  x   [ ] Eastern Time - Michigan - most locations
  x   [X] Eastern Time - Kentucky - Louisville area
  x   [ ] Eastern Time - Kentucky - Wayne County
  x   [ ] Eastern Time - Indiana - most locations
  x   [ ] Eastern Time - Indiana - Daviess, Dubois, Knox & Martin Counties
  x   [ ] Eastern Time - Indiana - Pulaski County
  x   [ ] Eastern Time - Indiana - Crawford County
  x   [ ] Eastern Time - Indiana - Pike County
  x   [ ] Eastern Time - Indiana - Switzerland County
  x   [ ] Central Time
  x   [ ] Central Time - Indiana - Perry County
  v   [ ] Central Time - Indiana - Starke County

qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F6_Help
q Confirm Information qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  > Confirm the following information.  If it is correct, press F2;
    to change any information, press F4.


                          Time zone: Eastern Time
                                     (US/Eastern)




qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
  Please wait...   F4_Change    F6_Help
q Root Password qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq

  Please enter the root password for this system.

  The root password may contain alphanumeric and special characters.  For
  security, the password will not be displayed on the screen as you type it.

  > If you do not want a root password, leave both entries blank.


                     Root password:
                     Root password:  ********
                                     ********




qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
    F2_Continue    F6_Help




rebooting system due to change(s) in /etc/default/init


[NOTICE: Zone rebooting]


SunOS Release 5.10 Version Generic_147148-26 64-bit
Copyright (c) 1983, 2013, Oracle and/or its affiliates. All rights reserved.
Hostname: bishal

bishal console login: Jun 25 12:15:37 bishal sendmail[7176]: My unqualified host name (bishal) unknown; sleeping for retry
Jun 25 12:15:37 bishal sendmail[7182]: My unqualified host name (bishal) unknown; sleeping for retry
root
Password:
Jun 25 12:15:55 bishal login: ROOT LOGIN /dev/console
Oracle Corporation      SunOS 5.10      Generic Patch   January 2005
# bash
bash-3.2# useradd -d /export/home/bishal -m -c "Bishal" -s /bin/bash bishal
UX: useradd: ERROR: Unable to create the home directory: No such file or directory.
bash-3.2# ls -ld /export/hom
bash-3.2# mkdir Jun 25 12:16:37 bishal sendmail[7176]: unable to qualify my own domain name (bishal) -- using short name
Jun 25 12:16:37 bishal sendmail[7176]: [ID 702911 mail.alert] unable to qualify my own domain name (bishal) -- using short name
Jun 25 12:16:37 bishal sendmail[7182]: unable to qualify my own domain name (bishal) -- using short name
Jun 25 12:16:37 bishal sendmail[7182]: [ID 702911 mail.alert] unable to qualify my own domain name (bishal) -- using short name
/export/home
bash-3.2# useradd -d /export/home/bishal -m -c "Bishal" -s /bin/bash bishal
bash-3.2# passwd bishal
New Password:
passwd: The password must contain at least 1 numeric or special character(s).

Please try again
New Password:

bash-3.2# grep root /etc/passwd
root:x:0:0:Super-User:/:/sbin/sh
bash-3.2# grep root /etc/shadow
root:l5u2cH9PhmZI6:6445::::::
bash-3.2# vi /etc/ssh/sshd_config
"/etc/ssh/sshd_config" 155 lines, 4997 characters
#
# Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
#
# ident "@(#)sshd_config        1.10    10/10/19 SMI"
#
# Configuration file for sshd(1m)

# Protocol versions supported
#
# The sshd shipped in this release of Solaris has support for major versions
# 1 and 2.  It is recommended due to security weaknesses in the v1 protocol
# that sites run only v2 if possible. Support for v1 is provided to help sites
# with existing ssh v1 clients/servers to transition.
# Support for v1 may not be available in a future release of Solaris.
#
# To enable support for v1 an RSA1 key must be created with ssh-keygen(1).
# RSA and DSA keys for protocol v2 are created by /etc/init.d/sshd if they
# do not already exist, RSA1 keys for protocol v1 are not automatically created.

# Uncomment ONLY ONE of the following Protocol statements.

# Only v2 (recommended)
/PermitR
# Depending on the setup of pam.conf(4) this may allow tunneled clear text
# passwords even when PasswordAuthentication is set to no. This is dependent
# on what the individual modules request and is out of the control of sshd
# or the protocol.
PAMAuthenticationViaKBDInt yes

# Are root logins permitted using sshd.
# Note that sshd uses pam_authenticate(3PAM) so the root (or any other) user
# maybe denied access by a PAM module regardless of this setting.
# Valid options are yes, without-password, no.
#PermitRootLogin no
PermitRootLogin yes

# sftp subsystem
Subsystem       sftp    internal-sftp


# SSH protocol v1 specific options
#
# The following options only apply to the v1 protocol and provide
# some form of backwards compatibility with the very weak security
"/etc/ssh/sshd_config" 156 lines, 5018 characters nd the functionality
bash-3.2# svcs -a | grep ssh
online         12:15:37 svc:/network/ssh:default
bash-3.2# svcadm restart ssh
bash-3.2# Jun 25 12:18:59 bishal su: 'su root' succeeded for bishal on /dev/pts/8

bash-3.2#
bash-3.2# hostname
bishal
bash-3.2# ifconfig -a
lo0:1: flags=2001000849<UP,LOOPBACK,RUNNING,MULTICAST,IPv4,VIRTUAL> mtu 8232 index 1
        inet 127.0.0.1 netmask ff000000
e1000g0:1: flags=1000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4> mtu 1500 index 2
        inet 192.168.10.221 netmask ffffff00 broadcast 192.168.10.255
bash-3.2# exit
# ^D

bishal console login: Jun 25 12:30:17 bishal su: 'su root' succeeded for bishal on /dev/pts/9

Solaris 10 - Fixing missing home directory issue

If your environment has autofs, this solution does not work. Check autofs related config file, your share and try to mount manually. On isolated environment, or LAB environment, if you forget to use -m option, home directory will not be created. To fix the issue, follow the steps below.

Missing home directory issue

When you login, you will see following error,

Could not chdir to home directory /export/home/bishal: No such file or directory

# mkdir /export/home/bishal
# id -a bishal
uid=100(bishal) gid=1(other) groups=1(other)
# cp -r /etc/skel/* /export/home/bishal/
# chown -R bishal:1 /export/home/bishal
# chmod 700 /export/home/bishal
#

Login as a root user directory on solaris 10

# vi /etc/ssh/sshd_conf
PermitRootLogin yes

# svcs -a | grep ssh
online          8:19:33 svc:/network/ssh:default
# svcadm restart  svc:/network/ssh:default
#

Relogin to your server

login as: root
Password:
#

Solaris10 - Installing solaris 10 on Sun T-series server

Login to console on SUN t5120

1. Login to console
$ ssh <IP Addr>

2. Power on the system
-> start /SYS

3. Start the console
-> start /SP/console

{0} ok

4. To exit off the console type #.

5. To login to console
-> start /SP/console

6. Once you are on OK prompt, insert the OS CD and boot the system to Install the operating system
{0} ok boot cdrom


Select following options at the prompt
7. Language selection:
Please make a choice (0 - 10), or press h or ? for help: 0
0 for english

8. What type of terminal are you using?  [ we will be using 6) PC Console ]

Type the number of your choice and press Return: 6


9. Press f2 to continue [ Esc-2_Continue ]

10. On Identify This System  windows, press Esc-2_continue

11. on ' Network Connectivity' select the box next to Networked [x]

12. on  Configure Multiple Network Interfaces, select the first instance of the interface since we plug the network cable on that port
Note: to select, go to partucular port and press space bar to select.

13. on DHCP for e1000g0 screen, Use DHCP for e1000g0, select No option

specify the ip address on next screen.

14. on  Subnet for e1000g0 screen, select yes and continue

15. on Netmask for e1000g0  screen, specify your subnet: 255.255.255.0

on IPv6, just select no and continue

16.  on Set the Default Route for e1000g0 screen, just select [X] Detect one upon reboot or you can specify one. for our case 192.168.10.1

17, on ' Confirm Information for e1000g0 ' screen, verify the information and make sure they are correct

18. Esc+2 to continue

19. on ' Configure Security Policy' windows, just select No and ocntinue

20. on  Confirm Information screen, verify the info and continue

21. on ' Name Service' screen, select the one, based on your environment. In our case, we are not using any name service so we will select None. and continue

22. Confirm the info and continue.

23. on  NFSv4 Domain Name - select Use the NFSv4 domain derived by the system and continue

24. On ' Time Zone ' windows, specify the time zone.
    - in our case, select second box, press Esc+2 to continue
    - select the first option: United States and continue
    - Select first option Eastern time and continue
    - next screen on 'Date and Time' windows, just press Esc+2 to continue
    - Next page on 'Confirm Information' page, you verify the info. if you find wrong info, press Esc+4 to change.

25. On ' Root Password' window, specify your root password. and continue by pressing Esc +2

26. on 'Enabling remote services' windows, select no, and continue

27. on 'Provide Oracle Configuration Manager Registration Information' windows, press Esc+2 to continue.

28. Press Esc+2 to continue again.

until now, System identification is completed.

Installation process beginds now.

29. On 'Solaris Interactive Installation' windows, press F2_Standard for standrd installation, and F4_Flash for flash archive installation (from Gold Image). We will select standard installation

30. on 'iSCSI Installation' window, select  Install on non-iSCSI target


eth0: config: auto-negotiation on, 100FDX, 100HDX, 10FDX, 10HDX.
Listening on LPF/eth0/00:21:28:24:41:ed
Sending on   LPF/eth0/00:21:28:24:41:ed
Sending on   Socket/fallback
DHCPDISCOVER on eth0 to 255.255.255.255 port 67 interval 4
eth0: status: link up.
eth0: status: link up, 100 Mbps Full Duplex, auto-negotiation complete.
DHCPDISCOVER on eth0 to 255.255.255.255 port 67 interval 6
DHCPOFFER from 192.168.10.1
DHCPREQUEST on eth0 to 255.255.255.255 port 67
DHCPACK from 192.168.10.1
bound to 192.168.10.36 -- renewal in 43004 seconds.
done.


Solaris 10 software installation succeeded

Customizing system files
        - Mount points table (/etc/vfstab)
        - Network host addresses (/etc/hosts)
        - Environment variables (/etc/default/init)

Cleaning devices

Customizing system devices
        - Physical devices (/devices)
        - Logical devices (/dev)

Installing boot information
        - Installing boot blocks (c1t0d0s0)
        - Installing boot blocks (/dev/rdsk/c1t0d0s0)
        - Updating system firmware for automatic rebooting
        - Given Disk (/dev/dsk/c1t0d0s0) is not a iSCSI Disk



Installation log location
        - /a/var/sadm/system/logs/install_log (before reboot)
        - /var/sadm/system/logs/install_log (after reboot)

Installation complete
Executing SolStart postinstall phase...
Executing finish script "patch_finish"...


Finish script patch_finish execution completed.
Executing JumpStart postinstall phase...

The begin script log 'begin.log'
is located in /var/sadm/system/logs after reboot.

The finish script log 'finish.log'
is located in /var/sadm/system/logs after reboot.


Launching installer. Please Wait...



Installing Additional Software
|-1%--------------25%-----------------50%-----------------75%--------------100%|

   Pausing for 30 seconds at the "Summary" screen. The wizard will continue to
   the next step unless you select "Pause". Enter 'p' to pause. Enter 'c' to
   continue. [c]


Installing Additional Software
|-1%--------------25%-----------------50%-----------------75%--------------100%|

   Pausing for 30 seconds at the "Summary" screen. The wizard will continue to
   the next step unless you select "Pause". Enter 'p' to pause. Enter 'c' to
   continue. [c] c


   Pausing for 90 seconds at the "Reboot" screen. The wizard will continue to
   the next step unless you select "Pause". Enter 'p' to pause. Enter 'c' to
   continue. [c]

Creating boot_archive for /a
updating /a/platform/sun4v/boot_archive


syncing file systems... done
rebooting...
Resetting...
ChassisSerialNumber BEL0902U3E


SPARC Enterprise T5120, No Keyboard
Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
OpenBoot 4.33.6.f, 32640 MB memory available, Serial #86262244.
Ethernet address 0:21:28:24:41:e4, Host ID: 852441e4.

Friday, June 23, 2017

Linux - Increase A VMware Disk Size (VMDK) Formatted As Linux LVM without rebooting

Increase A VMware Disk Size (VMDK) Formatted As Linux LVM without rebooting

To increase the size of your VMware Virtual Machine, you need
1. Increase the disk's size in your vSphere Client or through the CLI. to increase the "hardware" disk
2. Then, use  partition option to extend it.


Increase from 3GB to 10 Gb

1) Checking if you can extend the current disk or need to add a new one

Note: If disk is already partitioned in 4 primary partitions, you can not extend it. Check with fdisk -l command


# fdisk -l

/dev/sda1   *           1          25      200781   83  Linux
/dev/sda2              26        2636    20972857+  8e  Linux LVM

we saw two partitions, you can extent current disk in a virtual machine.
if you have 4 parititon, you have to add new virtual disk to your system.


2) The "hardware" part, "physically" adding diskspace to your VM

- login to your VMware, go to edit setting of VM
- Highlight the disk you want to expand and change the size.
- If the size section is greyed out, shutdown your VM.
Note: you have snapshot taken of that VM, remove it.
- Reboot  your system, if you have complated without reboot, you have to rescan the scsi device.
# ls -l /sys/class/scsi_device


you should have a disk with update disk geometry.



Source - online
















Solaris 11 - Migration of Solaris 10 LDOM oracle database server to Solaris11

Solaris 11 - Migration of Solaris 10 LDOM oracle database server to Solaris11


Prerequisites tasks

For Oracle database on Solaris 11 gold server, install following packages.

# pkg publisher
# pkginfo -l SUNWcsl
# pkg info -r motif
# pkg list | grep -i motif
# pkg install library/motif
# pkg list | grep -i xwplt
# pkg info -r xwplt
# pkg info -r SUNWxwplt
# pkg install compatibility/packages/SUNWxwplt
# pkg install compatibility/ucb


Build LDOM

1. Create domain
# ldm add-domain my_ldm-v01
# ldm list

2. Add CPU/Memory/Network info
a. Add CPU/Mem
# ldm add-vcpu 2 my_ldm-v01
# ldm add-memory 2g my_ldm-v01
# ldm list | grep sol11
b. Add network info. since its IPMP, you need to add two NIC,
Get source LDOM network info, and match it to destination
# ldm list-bindings my_ldm-v01
    vnet0            primary-vsw2@primary 
    vnet1            primary-vsw3@primary 

# ldm add-vnet link-prop=phys-state vnet0 primary-vsw0 <ldom>
# ldm add-vnet linkprop=phys-state vnet0 primary-vsw0 my_ldm-v01
# ldm add-vnet linkprop=phys-state vnet1 primary-vsw1 my_ldm-v01
# ldm list-bindings my_ldm-v01
# ldm list | more

3. Assign Disk space

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Note from storage team:

From Solaris 11 Goldsrv  SOL-MG-P01 (VM: sol-11-goldsrv    LUN 35/ID:  31D)    present to     
SOL-MG-P02 (VM:  my_ldm_v02-sol_11) LUN 48/ID: 035e



1.  From Compellant Control Center, under SOL-MG-P02, create the following VM Folders:
       my_ldm_v03-sol_11
       my_ldm_v02-sol_11

2.  Create the following LUN COPY (not Replay):
      From Solaris 11 Goldsrv  SOL-MG-P01 (VM: sol-11-goldsrv    LUN 35/ID:  31D)    present to      SOL-MG-P02 (VM:  my_ldm_v03-sol_11)  LUN

47/ID:035d
      From Solaris 11 Goldsrv  SOL-MG-P01 (VM: sol-11-goldsrv    LUN 35/ID:  31D)    present to      SOL-MG-P02 (VM:  my_ldm_v02-sol_11) LUN

48/ID: 035e

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~


a. Scan the LUNS
# cfgadm -al
# cfgadm -al -o show_FCP_dev
b. Find LUN Number
# luxadm display /dev/rdsk/c3t6000D310006D640000000000000001FFd0s2
Look under    
Device Address              5000d310006d6431,1c
here, for eg, 1c is the hex number, you have to conver it to DEC, use programmer calculator to change.
to convert 1c to dec it is 28.

# echo | format | egrep "0356|0357"
# ldm add-vdsdev /dev/dsk/c0t6000D310006D64000000000000000356d0s2 DL42@primary-vds0
# ldm add-vdsdev /dev/dsk/c0t6000D310006D64000000000000000357d0s2 DL43@primary-vds0
# ldm add-vdisk VDL42 DL42@primary-vds0 my_ldm_v04_sol11
# ldm add-vdisk VDL43 DL43@primary-vds0 my_ldm-v01
# ldm list-bindings my_ldm_v04_sol11
# ldm list-bindings my_ldm-v01
# ldm list-bindings my_ldm_v04_sol11
# ldm list

4. Set auto boot to false so that system does not boot upon power on.
# ldm set-variable auto-boot\?=false my_ldm-v01
# ldm set-variable auto-boot\?=false my_ldm_v04_sol11

5. Bind and start LDOM
# ldm list
# ldm bind my_ldm_v04_sol11
# ldm start my_ldm_v04_sol11
LDom my_ldm_v04_sol11 started
# ldm start my_ldm-v01


6. List detail about Solaris 11 LDOM
# ldm list-bindings my_ldm-v01 | more

7. List detail about SOlaris 10 server LDOM
# ldm list-bindings my_ldm_v05 | more

8. Connect to the console
# telnet 0 5013
{0} ok boot -s

Enter user name for system maintenance (control-d to bypass):

root@sol-11-goldsrv:~# ifconfig -a
ipmp0: flags=10c001000842<BROADCAST,RUNNING,MULTICAST,IPv4,DUPLICATE,IPMP,PHYSRUNNING> mtu 1500 index 2
        inet 192.168.10.66 netmask ffffff80 broadcast 192.168.10.127
        groupname ipmp0
net0: flags=100001000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,PHYSRUNNING> mtu 1500 index 3
net1: flags=100061000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,STANDBY,INACTIVE,PHYSRUNNING> mtu 1500 index 4


9. change the following info based on your new env for initial set up of ip/hostname and run it
root@sol-11-goldsrv:/var/tmp/initial-setup# vi setup.sh

ENCLAVE=<dev>
HOSTNAME=<sol-11-goldsrv>
DOMAIN=<expanor.dev>
IP_ADDRESS=<192.168.10.69>

root@sol-11-goldsrv:/var/tmp/initial-setup# export EDITOR=vi
root@sol-11-goldsrv:/var/tmp/initial-setup# export TERM=vt100
root@sol-11-goldsrv:/var/tmp/initial-setup# sh setup.sh


root@sol-11-goldsrv:/var/tmp/initial-setup# svcs -a | grep network/physical
online          9:53:04 svc:/network/physical:upgrade
online          9:54:13 svc:/network/physical:default


root@sol-11-goldsrv:/var/tmp/CAC# ifconfig -a

ipmp0: flags=108001000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,IPMP,PHYSRUNNING> mtu 1500 index 2
        inet 192.168.10.69 netmask ffffff80 broadcast 192.168.10.127
        groupname ipmp0
net0: flags=100001000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,PHYSRUNNING> mtu 1500 index 3
net1: flags=100061000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,STANDBY,INACTIVE,PHYSRUNNING> mtu 1500 index 4

Verify hostname/ipaddress of the output.
root@sol-11-goldsrv:/var/tmp/CAC/DEV# svcprop identity:node | more


10. Change role into user.

root@sol-11-goldsrv:/var/tmp/CAC/DEV# usermod -R "" jay
root@sol-11-goldsrv:/var/tmp/CAC/DEV# usermod -R "" sam
root@sol-11-goldsrv:/var/tmp/CAC/DEV# rolemod -K type=normal root


11. Now, reboot the machine and login to server using new IP and set up LDAP-CAC
my_ldm_v04 console login:
root@sol-11-goldsrv:/var/tmp/CAC/DEV# reboot
root@sol-11-goldsrv:/# cd /var/tmp/CACDEV/
root@sol-11-goldsrv:/var/tmp/CAC/DEV# sh ldap_inst_dev.sh



12. Go to source server, review cat /etc/ passwd, shadow and group and copy all the user entry related to database/application account.

# egrep "gridmon|iascan|oinstall|oracle" /etc/passwd
# egrep "gridmon|iascan|oinstall|oracle" /etc/shadow
# for i in gridmon iascan oinstall oracle;  do id -a $i; done
# egrep "102|501|503" /etc/group


13. Copy the content of /export/home directory and also /var/opt/oracle
a. Enable direct root login to destination host
# vi /usr/local/ssh6.6p1/etc/sshd_config
PermitRootLogin Yes

b. And restart the service
# /etc/init.d/ssh stop/start



14. Copy important files/directories (/home dir /var/opt/oracle) from source, and update to destination server.
A. Copy from source server
a. Copy directly (use tar and scp without saving on source host

# cd /var/opt
# tar cfE - oracle | ssh root@192.168.10.69 "( cd /var/opt; tar xfBp - )"
# cd /export/home
# tar cfE - gridmon oradev oinstall | ssh root@192.168.10.69 "( cd /export/home; tar xfBp - )"


b. Tar the content at /var/tmp and use scp to copy
# cd /export/home/; du -sh gridmon iascan oinstall oracle
# tar -cvf /var/tmp/home_dir.tar gridmon iascan oinstall oracle
# cd /var/opt;# du -sh *
# tar -cvf /var/tmp/var_opt_oracle.tar oracle
# ls -lh /var/tmp/var_opt_oracle.tar /var/tmp/home_dir.tar
# cd /var/tmp; scp var_opt_oracle.tar home_dir.tar sam@192.168.10.69:/var/tmp


B. Update destination server if needed

root@my_ldm_v04:~# vi /etc/passwd
root@my_ldm_v04:~# vi /etc/passwd
root@my_ldm_v04:~# vi /etc/shadow
root@my_ldm_v04:~#
root@my_ldm_v04:~# vi /etc/group
root@my_ldm_v04:~#

# tar -tvf /var/tmp/home_dir.tar | more
# cd /export/home/; tar -xvf /var/tmp/home_dir.tar
# cd /var/opt; tar -xvf /var/tmp/var_opt_oracle.tar
# cd oracle; ls
# df -h

verify user env,
# su - oinstall
[oinstall@my_ldm_v04]>env


15. Update sudoers file
# visudo

make sure you verify source/destination file content.


16. Update vfstab, system, and projects file
# vi /etc/vfstab
make sure you verify source/destination file content.



root@my_ldm_v04:~# cat /etc/system

Review/change your zfs arc size.

set zfs:zfs_arc_max=2147483648
set zfs:zfs_arc_min=67108864
* Setting max file descriptors
set rlim_fd_max=65536
set rlim_fd_cur=1024
* To prevent buffer overflow
* set noexec_user_stack = 1
* set noexec_user_stack_log = 1
* Enable BSM Auditing
set c2audit:audit_load = 1
* Monitor for unauthorized connections of mobile devices
exclude: scsa2usb


Review oracle sga size and update your project file. (GB in size?)
# projects -l

user.oinstall
        projid : 101
        comment: ""
        users  : oinstall
        groups : oinstall
                 dba
        attribs: process.max-sem-nsems=(priv,810,deny)
                 project.max-sem-ids=(priv,512,deny)
                 project.max-shm-ids=(priv,512,deny)
                 project.max-shm-memory=(priv,4294967296,deny)
oracle
        projid : 220
        comment: ""
        users  : oinstall
                 oracle
        groups : oinstall
                 dba
        attribs: process.max-sem-nsems=(priv,256,deny)
                 project.max-sem-ids=(priv,100,deny)
                 project.max-shm-ids=(priv,100,deny)
                 project.max-shm-memory=(priv,4294967296,deny)
root@my_ldm_v04:~#



17. refresh nfs/client
# svcadm enable -r nfs/client


18. Now, time to shutdown source system. stop the source LDOM.
Login to console on destination host and change the IP address info

root@SOL-MG-P02 # ldm stop my_ldm_v05
root@SOL-MG-P02 # ldm stop my_ldm_v04
root@SOL-MG-P02 # ldm unbind my_ldm_v05
root@SOL-MG-P02 # ldm unbind my_ldm_v04

# telnet 0 5014
a. Review existing IP/host info
# ifconfig -a | more
ipmp0: flags=108001000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,IPMP,PHYSRUNNING> mtu 1500 index 2
        inet 192.168.10.69 netmask ffffff80 broadcast 192.168.10.127
        groupname ipmp0
net0: flags=100001000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,PHYSRUNNING> mtu 1500 index 3
net1: flags=100061000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,STANDBY,INACTIVE,PHYSRUNNING> mtu 1500 index 4

# ipadm show-addr
b. Remove old IPMP setting
# ipadm delete-addr ipmp0/v4

c. Assign new IP
# ipadm create-addr -T static -a  192.168.10.33 ipmp0
# svccfg -s identity:node setprop private_data/mapped_addresses=192.168.10.33
# svcadm refresh identity:node
# svcadm restart identity:node
# svcprop identity:node | more

19. Login to control domain and set CPU/MEM
Note: change CPU/Mem while on multi user mode or on power off state.

a. Change CPU/Mem info
root@SOL-MG-P02 # ldm set-vcpu 16 my_ldm_v04_sol11
root@SOL-MG-P02 # ldm set-mem 10G my_ldm_v04_sol11

root@SOL-MG-P02 # ldm set-mem 8G my_ldm-v01
root@SOL-MG-P02 # ldm set-vcpu 16 my_ldm-v01

b. if you are at OK prompt, stop the LDOM and allocate resources.
root@SOL-MG-P02 # ldm stop my_ldm-v01
root@SOL-MG-P02 # ldm set-mem 8G my_ldm-v01


root@SOL-MG-P02 # ldm set-mem 8G my_ldm-v01
root@SOL-MG-P02 # ldm set-vcpu 16 my_ldm-v01
root@SOL-MG-P02 #



20. If you are on multiuser mode, reboot your system.
# init 5 or reboot

or

if your VM is powered off, Power on your VM and boot the system and login to console

root@SOL-MG-P02 # ldm start my_ldm-v01
root@SOL-MG-P02 # ldm start my_ldm-v01

Login to console
# telnet 0 5014

{0} ok boot


Once you login, check logs, filesystem and login using CAC

root@SOL-MG-P02 # ldm list
NAME             STATE      FLAGS   CONS    VCPU  MEMORY   UTIL  UPTIME
primary          active     -n-cv-  UART    8     8G       1.2%  48d 23h 24m
my_ldm-v01        bound      ------  5013    16    8G
my_ldm_v04_sol11   active     -n----  5014    16    10G      0.1%  1d 7h 3m
my_ldm_v02       active     -n----  5010    32    16G      0.2%  48d 23h 19m
my_ldm_v03       active     -n----  5001    16    8G       1.7%  48d 23h 19m
my_ldm_v05       inactive   ------          16    8G
my_ldm_v04       inactive   ------          16    10


To change IP Address:

# Delete OLD IP Address
echo "Deleting old IP Address" && sleep 1; IPADDROBJ=`ipadm show-addr | sed -n '/^ipmp*/p' | awk '{print $1}'` ipadm delete-addr "$IPADDROBJ"

# Assigning NEW IP Address
echo "Assigning New IP Address" && sleep 1; ipadm create-addr -T static -a "$IP_ADDRESS" ipmp0

Update IP Address in SMF:
svccfg -s identity:node setprop private_data/mapped_addresses=192.168.10.41
svcadm refresh identity:node
svcadm restart identity:node

**don't forget to copy over /var/opt/oracle

**don't forget NFS entries
SOL-MG-P01:/repository - /repository nfs - yes rw,soft 192.168.10.16:/backup - /BACKUP nfs - yes

hard,rw,rsize=32768,wsize=32768,suid,proto=tcp,vers=3

** don't forget to update /etc/hosts,  add #repo entry as well # Repo
192.168.10.11   SOL-MG-P01.expanor.dev    SOL-MG-P01




root@my_ldm_v04:/data#  svcs -a | grep rad
disabled       20:11:26 svc:/system/device/mpxio-upgrade:default
disabled       20:11:29 svc:/network/inetd-upgrade:default
disabled       20:11:29 svc:/system/device/policy-upgrade:default
disabled       20:11:36 svc:/system/rad:remote
online         20:11:30 svc:/network/connectx/unified-driver-post-upgrade:default
online         20:11:31 svc:/system/name-service/upgrade:default
online         20:11:36 svc:/network/eoib/eoib-post-upgrade:default
online         20:11:44 svc:/system/logadm-upgrade:default
online         20:11:45 svc:/system/rad:local
online         20:11:45 svc:/system/rad:local-http
online         20:11:46 svc:/network/physical:upgrade
online         20:11:46 svc:/network/location:upgrade
root@my_ldm_v04:/data# svcadm disable svc:/system/rad:local
root@my_ldm_v04:/data# svcadm disable svc:/system/rad:local-http
root@my_ldm_v04:/data#


root@my_ldm_v04:~#  svcs -a | grep -i rad
disabled       Jun_19   svc:/system/device/mpxio-upgrade:default
disabled       Jun_19   svc:/network/inetd-upgrade:default
disabled       Jun_19   svc:/system/device/policy-upgrade:default
disabled       Jun_19   svc:/system/rad:remote
online         Jun_19   svc:/network/connectx/unified-driver-post-upgrade:default
online         Jun_19   svc:/system/name-service/upgrade:default
online         Jun_19   svc:/network/eoib/eoib-post-upgrade:default
online         Jun_19   svc:/system/rad:local-http
online         Jun_19   svc:/system/rad:local
online         Jun_19   svc:/system/logadm-upgrade:default
online         Jun_19   svc:/network/physical:upgrade
online         Jun_19   svc:/network/location:upgrade
root@my_ldm_v04:~#



#  passwd root
#  passwd sam
#  chmod 700 /export/home/*
#  for i in `ls /export/home/*/.bash_history`; do ls -l $i; done
#  for i in `ls /export/home/*/.bash_history`; do cat /dev/null > $i; done
#  for i in `ls /export/home/*/.bash_history`; do ls -l $i; done
#  for i in `ls /export/home/*/.profile /export/home/*/.bash_profile`; do echo "export HISTCONTROL=ignorespace" >> $i; done
#  for i in `ls /export/home/*/.profile /export/home/*/.bash_profile`; do grep HISTCONTROL $i; done
#  ls -ld /export/home/*
#  svcs -a | grep -i mile
#  uname -a