Wednesday, May 28, 2014

Linux: find hba information

-bash-3.2$ nsu
Password:
[root@redhat ~]# cd /opt/hp
[root@redhat hp]# ls
hpdiags        hp-ilo       hpsmh             hpssa
hp_fibreutils  hpmouse      hp-smh-templates  hpssacli
hp-health      hp-OpenIPMI  hp-snmp-agents
[root@redhat hp]# cd hp_fibreutils
[root@redhat hp_fibreutils]# ls
adapter_info  hp_rescan  lssd  lssg  scsi_info
[root@redhat hp_fibreutils]# ./hp_rescan -h
NAME

hp_rescan

DESCRIPTION

Sends the rescan signal to all or selected Fibre Channel HBAs/CNAs.

OPTIONS

-a, --all      - Rescan all Fibre Channel HBAs
-h, --help     - Prints this help message
-i, --instance - Rescan a particular instance
-l, --list     - List all supported Fibre Channel HBAs
[root@redhat hp_fibreutils]# ./hp_rescan -l
QLogic adapters:

/sys/class/scsi_host/1
/sys/class/scsi_host/2

Emulex adapters:


Brocade adapters:

[root@redhat hp_fibreutils]# cd /sys/class/scsi_host
[root@redhat scsi_host]# ls
host0  host1  host2
[root@redhat scsi_host]# ls -la
total 0
drwxr-xr-x  5 root root 0 May 23 14:25 .
drwxr-xr-x 43 root root 0 May 23 14:26 ..
drwxr-xr-x  2 root root 0 May 26 11:13 host0
drwxr-xr-x  2 root root 0 May 26 11:13 host1
drwxr-xr-x  2 root root 0 May 26 11:13 host2
[root@redhat scsi_host]# cd host1
[root@redhat host1]# ls
84xx_fw_version   model_desc            state
beacon            model_name            subsystem
cmd_per_lun       mpi_version           total_isp_aborts
device            npiv_vports_inuse     uevent
driver_version    optrom_bios_version   unchecked_isa_dma
fabric_param      optrom_efi_version    unique_id
flash_block_size  optrom_fcode_version  vlan_id
fw_dump_size      optrom_fw_version     vn_port_mac_address
fw_state          pci_info              vport_create
fw_version        phy_version           vport_delete
host_busy         proc_name             zio
isp_id            scan                  zio_timer
isp_name          serial_num
max_npiv_vports   sg_tablesize
[root@redhat host1]# more driver_version
8.04.00.12.5.6-k2
[root@redhat host1]# more model_name
QMH2462
[root@redhat host1]#


Thursday, March 20, 2014

Solaris: nvalias

# format
Searching for disks...done


AVAILABLE DISK SELECTIONS:
       0. c1t0d0
          /pci@0/pci@0/pci@2/scsi@0/sd@0,0
       1. c1t1d0
          /pci@0/pci@0/pci@2/scsi@0/sd@1,0
       2. c4t60060E80056F110000006F11000060D4d0
          /scsi_vhci/ssd@g60060e80056f110000006f11000060d4
Specify disk (enter its number):


{0} ok printenv boot-device
boot-device =           /pci@0/pci@0/pci@2/scsi@0/disk@0,0:a disk net
{0} ok nvalias root-mirr /pci@0/pci@0/pci@2/scsi@0/disk@1,0:a
{0} ok devalias
root-mirr                /pci@0/pci@0/pci@2/scsi@0/disk@1,0:a
ttya                     /ebus@c0/serial@0,ca0000
nvram                    /virtual-devices/nvram@3
net3                     /pci@0/pci@0/pci@1/pci@0/pci@3/network@0,1
net2                     /pci@0/pci@0/pci@1/pci@0/pci@3/network@0
net1                     /pci@0/pci@0/pci@1/pci@0/pci@2/network@0,1
net0                     /pci@0/pci@0/pci@1/pci@0/pci@2/network@0
net                      /pci@0/pci@0/pci@1/pci@0/pci@2/network@0
cdrom                    /pci@0/pci@0/pci@1/pci@0/pci@1/pci@0/usb@0,2/hub@4/device@4/storage@0/disk@0:f
disk3                    /pci@0/pci@0/pci@2/scsi@0/disk@3
disk2                    /pci@0/pci@0/pci@2/scsi@0/disk@2
disk1                    /pci@0/pci@0/pci@2/scsi@0/disk@1
disk0                    /pci@0/pci@0/pci@2/scsi@0/disk@0
disk                     /pci@0/pci@0/pci@2/scsi@0/disk@0
scsi                     /pci@0/pci@0/pci@2/scsi@0
virtual-console          /virtual-devices/console@1
name                     aliases
{0} ok boot

HPUX: How to check hpux ilo firmware version

<>hpiLO-> show
status=0
status_tag=COMMAND COMPLETED


/
  Targets
    system1
    map1
  Properties
  Verbs
    cd version exit show


<>hpiLO-> show /map1

status=0
status_tag=COMMAND COMPLETED


/map1
  Targets
    firmware1
    accounts1
    log1
    enetport1
    dhcpendpt1
    dnsendpt1
    gateway1
    dnsserver1
    dnsserver2
    dnsserver3
    dhcpserver1
    settings1
    config1
    snmp1
    oemhp_dircfg1
    oemhp_vm1
    vlan1
    oemhp_ssocfg1
  Properties
    name=iLO 3 Advanced
    license=3368NY7M2BQGKTNMLDZ8KZLLH
  Verbs
    cd version exit show reset set oemhp_ping



hpiLO-> show firmware1
status=0
status_tag=COMMAND COMPLETED


/map1/firmware1
  Targets
  Properties
    version=1.10
    date=Jul 26 2010
  Verbs
    cd version exit show load set



hpiLO->

to upgrade


How to upgrade the firmware using SSH PDF Print E-mail
Written by Carlo Cacciafesta  
Thursday, 07 January 2010 13:35
How to upgrade the firmware using SSH

The following commands can be used to upgrade the firmware on an HP iLO or iLO2 interface. This procedure is particularly useful when the iLO stops working properly and it's not accessible from the server or via HTTP(S).

Use the following procedure:

    * Use SSH to access the iLO device
    * Log in to the iLO using administrative credentials
    * Type the following command and press enter: "load -source http://ip_address/path/imagename.bin /map1/firmware"
          o  Example: "load -source http://192.168.0.1/ilo2_180.bin /map1/firmware"
    * The interface will download the new image and will reset itself
    * To check the firmware version, log in again using SSH and enter the following command: "show /map1/firmware version"

Redhat: Find network device

[root@ devices]# du -a . | grep net
0       ./vmbus_0_0/vmbus_0_4/net:seth0
0       ./pci0000:00/0000:00:0a.0/net:eth0
[root@ devices]# pwd
/sys/devices
[root@ devices]#

Solaris: Restricted ftp access

groupadd partsftp
useradd -c "F1 performance project" -d /root/home/partsftp -g partsftp -m -s /usr/bin/false partsftp
svcadm -v enable ftp



Change user's home directory to /u01/network:
Code:
usermod -d /u01/network ftp_user

Then add:
Code:
restricted-uid ftp_user
to /etc/ftpd/ftpaccess file.


server1# vi /etc/shells
"/etc/shells" [New file]
/usr/bin/false



    deny-uid uid-range [uid-range...]
     deny-gid gid-range [gid-range...]
     allow-uid uid-range  [uid-range...]
     allow-gid gid-range [gid-range...]



/etc/ftpd/ftpaccess example
--------------
.
.
.
. omitted

# limit-time    anonymous       30
# limit         anonusers       10      Wk0730-1800       /etc/ftpd/toomany.msg
# limit         anonusers       50      SaSu|Any1800-0730 /etc/ftpd/toomany.msg
restricted-uid partsftp
allow-uid partsftp
deny-uid *
defumask 022
server1#

Thursday, February 20, 2014

Unix: tcpdump check connectivity

Useful commnd to run
tcpdump -nn -v -s 1500 -c 1 'ether[20:2] == 0x2000' -i ethX

Thursday, February 13, 2014

Solaris !0: How to start oracle11g with SPFILE in vcs main.cf

Below are task need to be performed.

1.       Edit cvs config file main.cf

Remove parameter Pfile from ORA_oraserver resources

Pfile = /opt/oracle/10.2.0.4_EE/dbs/initDS.ora


2.       Start vcs agent on node1. Clear any FAULT flags on resources.
3.       Start vcs agent on node2.

4.       Perform vcs failover test

bBy default if no pfile defined, vcs will look into $ORACLE_HOME/dbs ffor file below in sequence

  • SPFILEsid.ora
  • SPFILE.ora
  • initSID.ora


FFYI. hastop -all -force was already ran prior this activity

from

        Oracle ORA_server (
                Sid = DS
                Owner = oracle
                Home = "/opt/oracle/11.2.0.3_EE"
                Pfile = "/opt/oracle/11.2.0.3_EE/dbs/initDS.ora"
                EnvFile = "/tmp/env.sh"
                MonScript = "/opt/VRTSagents/ha/bin/Oracle/SqlTest.pl"

                )

to

        Oracle ORA_server (
                Sid = DS
                Owner = oracle
                Home = "/opt/oracle/11.2.0.3_EE"
                EnvFile = "/tmp/env.sh"
                MonScript = "/opt/VRTSagents/ha/bin/Oracle/SqlTest.pl"

                )

remember to run
#hacf -verify .

f







Friday, March 22, 2013

Solaris: Fibre Channel - device LUN cleanup on Solaris



Procedure to assist in removing Fiber Channel devices (on Solaris 10)
Gathering all the device information for failing devices and formulating the appropriate commands can be cumbersome.  We have an in-house tool to assist in the process that is non-destructive.  The tool will show the commands that you should use.  To use the tool simple run as root:    /install/veritas/vxvm/fc_show

 

Prereq for Veritas:    Remove device from VxFS/VxVM first . 

    If devices are under Veritas conrol:
  • first unmount the affected underlying VxFS filesystems . 
  • then remove associated disk from VxVM volume manager:   "vxdisk rm" on the device

 

General Procedure to remove devices SAN-attached storage

Description
When storage devices that present multiple luns to Solaris[TM] through a Storage Area Network(SAN) have some of those luns removed or made unavailable, Solaris device entries will still exist for those luns.
However Solaris may report "missing" or "failing" states for those luns. 
This document explains how to clean up the device entries and hence remove the error condition which is 
caused by Solaris trying to access the unavailable luns. 
This applies to Solaris 8 and Solaris 9 using the Sun StorEdge[TM] San Foundation Kit (SFK), 
also known as the Leadville driver stack. This document is specific to SAN-attached fibre channel storage and 
does not apply to direct-attached fibre channel storage.
Steps to Follow
The following commands will be presented:
- cfgadm -c configure [ap_id]
- cfgadm -al -o show_FCP_dev
- cfgadm -o unusable_FCP_dev -c unconfigure [ap_id]
- devfsadm -C
- ("luxadm -e offline " may also be needed)
The following output shows a system with 4 dual-pathed luns which are SAN-attached to a Solaris host:
cfgadm -al -o show_FCP_dev
Ap_Id Type Receptacle Occupant Condition
c2 fc-fabric connected configured unknown
c2::50060e8004274d20,0 disk connected configured unknown
c2::50060e8004274d20,1 disk connected configured unknown
c2::50060e8004274d20,2 disk connected configured unknown
c2::50060e8004274d20,3 disk connected configured unknown
c3 fc-fabric connected configured unknown
c3::50060e8004274d30,0 disk connected configured unknown
c3::50060e8004274d30,1 disk connected configured unknown
c3::50060e8004274d30,2 disk connected configured unknown
c3::50060e8004274d30,3 disk connected configured unknown
format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
(output omitted for clarity)
4. c2t50060E8004274D20d0
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,0
5. c2t50060E8004274D20d1
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,1
6. c2t50060E8004274D20d2
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,2
7. c2t50060E8004274D20d3
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,3
8. c3t50060E8004274D30d0
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,0
9. c3t50060E8004274D30d1
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,1
10. c3t50060E8004274D30d2
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,2
11. c3t50060E8004274D30d3
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,3
In this example, using the native tools of the storage device, we will remove all of the odd numbered luns. Here the storage is a Sun StorEdge[TM] 9990, so we used Storage Navigator to remove the lun mappings from the host.
The following output shows the same system after the luns have been removed:
cfgadm -al -o show_FCP_dev
Ap_Id Type Receptacle Occupant Condition
c2 fc-fabric connected configured unknown
c2::50060e8004274d20,0 disk connected configured unknown
c2::50060e8004274d20,1 disk connected configured failing
c2::50060e8004274d20,2 disk connected configured unknown
c2::50060e8004274d20,3 disk connected configured failing
c3 fc-fabric connected configured unknown
c3::50060e8004274d30,0 disk connected configured unknown
c3::50060e8004274d30,1 disk connected configured failing
c3::50060e8004274d30,2 disk connected configured unknown
c3::50060e8004274d30,3 disk connected configured failing
format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
(output omitted for clarity)
4. c2t50060E8004274D20d0
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,0
5. c2t50060E8004274D20d1
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,1
6. c2t50060E8004274D20d2
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,2
7. c2t50060E8004274D20d3
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,3
8. c3t50060E8004274D30d0
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,0
9. c3t50060E8004274D30d1
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,1
10. c3t50060E8004274D30d2
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,2
11. c3t50060E8004274D30d3
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,3
We can now see above, that "cfgadm -al -o show_FCP_dev" shows the "failing" state and format shows the device as "".
The first step in removing these devices is to change the state shown in the cfgadmoutput from "failing" to "unusable". This is done with the following command:
cfgadm -c configure c2 c3
cfgadm -al -o show_FCP_dev
Ap_Id Type Receptacle Occupant Condition
c2 fc-fabric connected configured unknown
c2::50060e8004274d20,0 disk connected configured unknown
c2::50060e8004274d20,1 disk connected configured unusable
c2::50060e8004274d20,2 disk connected configured unknown
c2::50060e8004274d20,3 disk connected configured unusable
c3 fc-fabric connected configured unknown
c3::50060e8004274d30,0 disk connected configured unknown
c3::50060e8004274d30,1 disk connected configured unusable
c3::50060e8004274d30,2 disk connected configured unknown
c3::50060e8004274d30,3 disk connected configured unusable
Possible extra step:
If the devices remaining in a "failing" state according to the above output fromcfgadm, and they do not move to an "unusable" state after running "cfgadm -c configure" as shown above, then the following command can also be tried:
luxadm -e offline /dev/dsk/c3t50060E8004274D30d3s2
(i.e. "luxadm -e offline )
Then re-run the previous cfgadm command (cfgadm -al -o show_FCP_dev) to check that the LUN state has changed from "failing" to "unusable". This luxadm command should then be repeated for each LUN which was previously shown in the "failing" state by cfgadm. Then carry on with the process below.
--oOo--
Now that the state of the inaccessible luns has been changed to "unusable" in the output from cfgadm, we can remove those entries from the list with the following command:
cfgadm -o unusable_FCP_dev -c unconfigure c2::50060e8004274d20
cfgadm -o unusable_FCP_dev -c unconfigure c3::50060e8004274d30

- If you tried remove device and you got some errors below, you'll use this:
# cfgadm -o unusable_FCP_dev -c unconfigure c3::50060e8004274d30
cfgadm: Library error: failed to offline: /devices/scsi_vhci/ssd@g600015d00005cc00000000000000f166
                    Resource                             Information    
------------------------------------------------  -------------------------
/dev/dsk/c6t600015D00005CC00000000000000F166d0s2  Device being used by VxVM
cfgadm -f -o unusable_FCP_dev -c unconfigure c3::50060e8004274d30

cfgadm -la -o show_FCP_dev
Ap_Id Type Receptacle Occupant Condition
c2 fc-fabric connected configured unknown
c2::50060e8004274d20,0 disk connected configured unknown
c2::50060e8004274d20,2 disk connected configured unknown
c3 fc-fabric connected configured unknown
c3::50060e8004274d30,0 disk connected configured unknown
c3::50060e8004274d30,2 disk connected configured unknown
format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
(output omitted for clarity)
4. c2t50060E8004274D20d0
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,0
5. c2t50060E8004274D20d2
/pci@23c,600000/SUNW,qlc@1/fp@0,0/ssd@w50060e8004274d20,2
6. c3t50060E8004274D30d0
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,0
7. c3t50060E8004274D30d2
/pci@23c,600000/SUNW,qlc@1,1/fp@0,0/ssd@w50060e8004274d30,2
Now we see that the luns are no longer displayed in the format listing.
Even though the output of the format command looks good, there are still entries for the removed devices in /dev/disk and /dev/rdsk. These can be removed if desired, by using the devfsadm command.
ls /dev/dsk/c2t50060E8004274D20d*
/dev/dsk/c2t50060E8004274D20d0s0 /dev/dsk/c2t50060E8004274D20d2s4
/dev/dsk/c2t50060E8004274D20d0s1 /dev/dsk/c2t50060E8004274D20d2s5
/dev/dsk/c2t50060E8004274D20d0s2 /dev/dsk/c2t50060E8004274D20d2s6
/dev/dsk/c2t50060E8004274D20d0s3 /dev/dsk/c2t50060E8004274D20d2s7
/dev/dsk/c2t50060E8004274D20d0s4 /dev/dsk/c2t50060E8004274D20d3s0
/dev/dsk/c2t50060E8004274D20d0s5 /dev/dsk/c2t50060E8004274D20d3s1
/dev/dsk/c2t50060E8004274D20d0s6 /dev/dsk/c2t50060E8004274D20d3s2
/dev/dsk/c2t50060E8004274D20d0s7 /dev/dsk/c2t50060E8004274D20d3s3
/dev/dsk/c2t50060E8004274D20d1s0 /dev/dsk/c2t50060E8004274D20d3s4
/dev/dsk/c2t50060E8004274D20d1s1 /dev/dsk/c2t50060E8004274D20d3s5
/dev/dsk/c2t50060E8004274D20d1s2 /dev/dsk/c2t50060E8004274D20d3s6
/dev/dsk/c2t50060E8004274D20d1s3 /dev/dsk/c2t50060E8004274D20d3s7
/dev/dsk/c2t50060E8004274D20d1s4 /dev/dsk/c2t50060E8004274D20d4s0
/dev/dsk/c2t50060E8004274D20d1s5 /dev/dsk/c2t50060E8004274D20d4s1
/dev/dsk/c2t50060E8004274D20d1s6 /dev/dsk/c2t50060E8004274D20d4s2
/dev/dsk/c2t50060E8004274D20d1s7 /dev/dsk/c2t50060E8004274D20d4s3
/dev/dsk/c2t50060E8004274D20d2s0 /dev/dsk/c2t50060E8004274D20d4s4
/dev/dsk/c2t50060E8004274D20d2s1 /dev/dsk/c2t50060E8004274D20d4s5
/dev/dsk/c2t50060E8004274D20d2s2 /dev/dsk/c2t50060E8004274D20d4s6
/dev/dsk/c2t50060E8004274D20d2s3 /dev/dsk/c2t50060E8004274D20d4s7
devfsadm -C
ls /dev/dsk/c2t50060E8004274D20d*
/dev/dsk/c2t50060E8004274D20d0s0 /dev/dsk/c2t50060E8004274D20d2s0
/dev/dsk/c2t50060E8004274D20d0s1 /dev/dsk/c2t50060E8004274D20d2s1
/dev/dsk/c2t50060E8004274D20d0s2 /dev/dsk/c2t50060E8004274D20d2s2
/dev/dsk/c2t50060E8004274D20d0s3 /dev/dsk/c2t50060E8004274D20d2s3
/dev/dsk/c2t50060E8004274D20d0s4 /dev/dsk/c2t50060E8004274D20d2s4
/dev/dsk/c2t50060E8004274D20d0s5 /dev/dsk/c2t50060E8004274D20d2s5
/dev/dsk/c2t50060E8004274D20d0s6 /dev/dsk/c2t50060E8004274D20d2s6
/dev/dsk/c2t50060E8004274D20d0s7 /dev/dsk/c2t50060E8004274D20d2s7



source: http://xteams.oit.ncsu.edu/iso/lun_removal

Thursday, March 21, 2013

Linux/Solaris set password to non expiry


Solaris 10
server# passwd -s dmadmin
dmadmin   PS    01/16/13     7    84    28

server# passwd -x -1 dmadmin
passwd: password information changed for dmadmin

server# passwd -s dmadmin
dmadmin   PS

server# grep dmadmin /etc/shadow
dmadmin:aDHLqWdyzszdc:15721::::::
server#

Linux


server:~ # chage -l dmadmin
Minimum:        0
Maximum:        90
Warning:        7
Inactive:       180
Last Change:            Jan 17, 2013
Password Expires:       Apr 17, 2013
Password Inactive:      Oct 14, 2013
Account Expires:        Never

server:~ # chage -m 0 -M 99999 -I -1 -E -1 dmadmin
Aging information changed.

server:~ # chage -l dmadmin
Minimum:        0
Maximum:        99999
Warning:        7
Inactive:       -1
Last Change:            Jan 17, 2013
Password Expires:       Never
Password Inactive:      Never
Account Expires:        Never
server:~ #

Sunday, March 17, 2013

Solaris 10: zfs and nfs shares

By default, the root user on a client machine has restricted access to an NFS-mounted share.




Here's how to grant full access to local root users to NFS mounts:



zfs set sharenfs=rw=@192.168.1.0/24,root=@192.168.1.0/24 space



This gives full access for root users on any machine in the 192.168.1.0/24 subnet to the zfs dataset "space".





serverB# zfs list

NAME USED AVAIL REFER MOUNTPOINT

app 284G 157G 18K none

app/iwstoreAPAC 51.8G 48.2G 51.8G /opt/app/data/iw-store/APAC

app/iwstoreAmericas 27.6G 22.4G 27.6G /opt/app/data/iw-store/Americas

app/iwstoreEMEA 192G 57.6G 192G /opt/app/data/iw-store/EMEA

app/optapp 12.3G 87.7G 12.3G /opt/app



for set of network

zfs set sharenfs=rw=@153.88.177.0/24,root=@153.88.177.0/24 app/iwstoreAPAC

zfs set sharenfs=rw=@153.88.177.0/24,root=@153.88.177.0/24 app/iwstoreAmericas

zfs set sharenfs=rw=@153.88.177.0/24,root=@153.88.177.0/24 app/iwstoreEMEA





or for specific ips

zfs set sharenfs=rw=153.88.177.59,root=153.88.177.59 app/iwstoreAPAC

zfs set sharenfs=rw=153.88.177.59,root=153.88.177.59 app/iwstoreAmericas

zfs set sharenfs=rw=153.88.177.59,root=153.88.177.59 app/iwstoreEMEA





On serverA

mkdir /serverB_APAC

mkdir /serverB_Americas

mkdir /serverB_EMEA





serverA# dfshares serverB

RESOURCE SERVER ACCESS TRANSPORT

serverB:/opt/app/data/iw-store/Americas serverB - -

serverB:/iwserver serverB - -

serverB:/opt/app/data/iw-store/EMEA serverB - -

serverB:/opt/app/data/iw-store/APAC serverB - -

serverA# mount serverB:/opt/app/data/iw-store/Americas /serverB_Americas

serverA# mount serverB:/opt/app/data/iw-store/EMEA /serverB_EMEA

serverA# df -k

Thursday, February 28, 2013

Solaris: ZFS how to mirror zfs slice not whole disk


ZFS how to mirror zfs slice not whole disk

new disk:
c4t60060E80056F110000006F110000804Bd0
c4t60060E80056F110000006F1100006612d0  

serverA# zpool status phtoolvca1
  pool: phtoolvca1
 state: ONLINE
status: The pool is formatted using an older on-disk format.  The pool can
        still be used, but some features are unavailable.
action: Upgrade the pool using 'zpool upgrade'.  Once this is done, the
        pool will no longer be accessible on older software versions.
 scan: none requested
config:

        NAME                                                 STATE     READ WRITE CKSUM
        phtoolvca1                                           ONLINE       0     0     0
          /dev/rdsk/c4t60060E80056F110000006F11000081A6d0s0  ONLINE       0     0     0
          c4t60060E80056F110000006F110000614Ad0              ONLINE       0     0     0

errors: No known data errors
serverA# zpool status | grep c4t60060E80056F110000006F110000804Bd0
serverA# zpool attach -f phtoolvca1 /dev/rdsk/c4t60060E80056F110000006F11000081A6d0s0 c4t60060E80056F110000006F110000804Bd0
cannot attach c4t60060E80056F110000006F110000804Bd0 to /dev/rdsk/c4t60060E80056F110000006F11000081A6d0s0: no such device in pool
serverA# zpool attach -f phtoolvca1 c4t60060E80056F110000006F11000081A6d0s0 c4t60060E80056F110000006F110000804Bd0
cannot attach c4t60060E80056F110000006F110000804Bd0 to c4t60060E80056F110000006F11000081A6d0s0: no such device in pool
serverA# man zppol
serverA#
serverA# prtvtoc /dev/rdsk/c4t60060E80056F110000006F11000081A6d0s2 | fmthard -s - /dev/rdsk/c4t60060E80056F110000006F110000804Bd0s2
fmthard:  New volume table of contents now in place.
serverA#
serverA# zdb -C phtoolvca1

MOS Configuration:
        version: 10
        name: 'phtoolvca1'
        state: 0
        txg: 1516759
        pool_guid: 17149951849739077007
        hostid: 2238627050
        hostname: 'serverA'
        vdev_tree:
            type: 'root'
            id: 0
            guid: 17149951849739077007
            children[0]:
                type: 'disk'
                id: 0
                guid: 8225298048714506169
                path: '/dev/rdsk/c4t60060E80056F110000006F11000081A6d0s0'
                devid: 'id1,ssd@n60060e80056f110000006f11000081a6/a,raw'
                phys_path: '/scsi_vhci/ssd@g60060e80056f110000006f11000081a6:a,raw'
                whole_disk: 1
                metaslab_array: 14
                metaslab_shift: 28
                ashift: 9
                asize: 53674246144
                is_log: 0
                DTL: 104
            children[1]:
                type: 'disk'
                id: 1
                guid: 6410631449240489329
                path: '/dev/dsk/c4t60060E80056F110000006F110000614Ad0s0'
                devid: 'id1,ssd@n60060e80056f110000006f110000614a/a'
                phys_path: '/scsi_vhci/ssd@g60060e80056f110000006f110000614a:a'
                whole_disk: 1
                metaslab_array: 61
                metaslab_shift: 27
                ashift: 9
                asize: 16092758016
                is_log: 0
                DTL: 103
serverA#
serverA# zpool status phtoolvca1
  pool: phtoolvca1
 state: ONLINE
status: The pool is formatted using an older on-disk format.  The pool can
        still be used, but some features are unavailable.
action: Upgrade the pool using 'zpool upgrade'.  Once this is done, the
        pool will no longer be accessible on older software versions.
 scan: none requested
config:

        NAME                                                 STATE     READ WRITE CKSUM
        phtoolvca1                                           ONLINE       0     0     0
          /dev/rdsk/c4t60060E80056F110000006F11000081A6d0s0  ONLINE       0     0     0
          c4t60060E80056F110000006F110000614Ad0              ONLINE       0     0     0

errors: No known data errors
serverA# zpool attach -f phtoolvca1 8225298048714506169 c4t60060E80056F110000006F110000804Bd0s0
serverA# zpool status phtoolvca1
  pool: phtoolvca1
 state: ONLINE
status: One or more devices is currently being resilvered.  The pool will
        continue to function, possibly in a degraded state.
action: Wait for the resilver to complete.
 scan: resilver in progress since Sat Feb 16 05:34:08 2013
    1.56G scanned out of 10.7G at 133M/s, 0h1m to go
    1.56G scanned out of 10.7G at 133M/s, 0h1m to go
    1.55G resilvered, 14.59% done
config:

        NAME                                                   STATE     READ WRITE CKSUM
        phtoolvca1                                             ONLINE       0     0     0
          mirror-0                                             ONLINE       0     0     0
            /dev/rdsk/c4t60060E80056F110000006F11000081A6d0s0  ONLINE       0     0     0
            c4t60060E80056F110000006F110000804Bd0s0            ONLINE       0     0     0  (resilvering)
          c4t60060E80056F110000006F110000614Ad0                ONLINE       0     0     0
serverA# zpool status | grep c4t60060E80056F110000006F1100006612d0
serverA# zpool attach -f phtoolvca1 c4t60060E80056F110000006F110000614Ad0 c4t60060E80056F110000006F1100006612d0
serverA# zpool status phtoolvca1
  pool: phtoolvca1
 state: ONLINE
status: One or more devices is currently being resilvered.  The pool will
        continue to function, possibly in a degraded state.
action: Wait for the resilver to complete.
 scan: resilver in progress since Sat Feb 16 05:36:48 2013
    5.71G scanned out of 10.7G at 487M/s, 0h0m to go
    5.71G scanned out of 10.7G at 487M/s, 0h0m to go
    1.20G resilvered, 53.37% done
config:

        NAME                                                   STATE     READ WRITE CKSUM
        phtoolvca1                                             ONLINE       0     0     0
          mirror-0                                             ONLINE       0     0     0
            /dev/rdsk/c4t60060E80056F110000006F11000081A6d0s0  ONLINE       0     0     0
            c4t60060E80056F110000006F110000804Bd0s0            ONLINE       0     0     0
          mirror-1                                             ONLINE       0     0     0
            c4t60060E80056F110000006F110000614Ad0              ONLINE       0     0     0
            c4t60060E80056F110000006F1100006612d0              ONLINE       0     0     0  (resilvering)

errors: No known data errors
serverA# zpool status phtoolvca1
  pool: phtoolvca1
 state: ONLINE
status: The pool is formatted using an older on-disk format.  The pool can
        still be used, but some features are unavailable.
action: Upgrade the pool using 'zpool upgrade'.  Once this is done, the
        pool will no longer be accessible on older software versions.
 scan: resilvered 2.71G in 0h0m with 0 errors on Sat Feb 16 05:37:21 2013
config:

        NAME                                                   STATE     READ WRITE CKSUM
        phtoolvca1                                             ONLINE       0     0     0
          mirror-0                                             ONLINE       0     0     0
            /dev/rdsk/c4t60060E80056F110000006F11000081A6d0s0  ONLINE       0     0     0
            c4t60060E80056F110000006F110000804Bd0s0            ONLINE       0     0     0
          mirror-1                                             ONLINE       0     0     0
            c4t60060E80056F110000006F110000614Ad0              ONLINE       0     0     0
            c4t60060E80056F110000006F1100006612d0              ONLINE       0     0     0


Wednesday, January 23, 2013

Solaris: copy new disk label


Duplicate the label's content from the boot disk to the mirror disk f

root# prtvtoc /dev/rdsk/c0t0d0s2 | fmthard -s - /dev/rdsk/c1t0d0s2

root# prtvtoc /dev/rdsk/c0t0d0s2 | fmthard -s - /dev/rdsk/c0t1d0s2

Wednesday, January 9, 2013

Solaris: move or add new sds device to server


    Look at source server /etc/lvm/md.tab file for metadevice config setting and edit accordingly to new server md.tab. then run #metainit d1000 where d1000 is the metadevice


    old server

    d457 1 1 /dev/dsk/c3t60060E8015320C000001320C00006092d0s0

    new server ( with new metadevice name )

    d1000 1 1 /dev/dsk/c3t60060E8015320C000001320C00006092d0s0

    Reference

    md.tab File Options

      The following md.tab file options are supported:
      metadevice-name
      When the metainit command is run with a metadevice-name as its only argument, it searches the /etc/lvm/md.tab file to find that name and its corresponding entry. The order in which entries appear in the md.tab file is unimportant. For example, consider the following md.tab entry:

      d0 2 1 c1t0d0s0 1 c2t1d0s0
      When you run the command metainit d0, it configures metadevice d0 based on the configuration information found in themd.tab file.
      -a
      Activates all metadevices defined in the md.tab file.
      metainit does not maintain the state of the volumes that would have been created when metainit is run with both the -aand -n flags. If a device d0 is created in the first line of the md.tab file, and a later line in md.tab assumes the existence ofd0, the later line fails when metainit -an runs (even if it would succeed with metainit -a).

Thursday, September 20, 2012

Solaris: modify cpu pool


# poolcfg -dc 'modify pset pset_ACC_INT_A_3 ( uint pset.min = 0 ; uint pset.max = 0)'
# poolcfg -dc 'modify pset pset_ACC_INT_A_4 ( uint pset.min = 0 ; uint pset.max = 0)'
# poolcfg -dc 'modify pset pset_ACC_INT_A_6 ( uint pset.min = 0 ; uint pset.max = 0)'

# poolcfg -dc 'transfer 2 from pset pset_ACC_INT_A_3 to pset_default'
# poolcfg -dc 'transfer 2 from pset pset_ACC_INT_A_4 to pset_default'
# poolcfg -dc 'transfer 2 from pset pset_ACC_INT_A_6 to pset_default'
# pooladm -c
# poolcfg -dc info


Change the pset number for cpupool below

pset_ACC_INT_A_1
pset_ACC_INT_A_2
pset_ACC_INT_A_5


# poolcfg -dc 'modify pset pset_ACC_INT_A_1 ( uint pset.min = 8 ; uint pset.max = 8)'
# poolcfg -dc 'modify pset pset_ACC_INT_A_2 ( uint pset.min = 4 ; uint pset.max = 4)'
# poolcfg -dc 'modify pset pset_ACC_INT_A_5 ( uint pset.min = 8 ; uint pset.max = 8)'
# poolcfg -dc 'transfer 4 from pset pset_default to pset_ACC_INT_A_1'
# poolcfg -dc 'transfer 2 from pset pset_default to pset_ACC_INT_A_2'
# poolcfg -dc 'transfer 6 from pset pset_default to pset_ACC_INT_A_5'

Tuesday, July 10, 2012

Solaris: VCS How to update SystemList without bring down service group


Question


I have a service group say sg1 and it has the following system list defined

SystemList = { a = 1, b = 2, c = 3 }
        AutoStartList = { a }

Currently, I am running service group in c. I want to change the system
order like this. a=3, b=2 and c=1 and autostartlist=c.


Can I do this without bringing down the service group?


Answer

haconf -makerw
hagrp -modify SystemList -update c 0 b 1 a 2
hagrp -modify AutoStartList c
haconf -dump -makero

source: http://mailman.eng.auburn.edu/pipermail/veritas-ha/2004-June/008414.html


Wednesday, May 9, 2012

Solaris: check free memory



Commands to check free memory on unix servers -

(1) vmstat 1 2 | tail -1 | awk '{printf "%d%s\n", ($5*4)/1024, "MB" }'

(2) top -h -d 1


Friday, April 6, 2012

Solaris: Zone capped memory

Add a memory cap.


zonecfg:zoneA> add capped-memory
Set the memory cap.


zonecfg:zoneA:capped-memory> set physical=50m
Set the swap memory cap.


zonecfg:zoneA:capped-memory> set swap=100m
Set the locked memory cap.


zonecfg:zoneA:capped-memory> set locked=30m
End the memory cap specification.


zonecfg:zoneA:capped-memory> end

Thursday, March 15, 2012

Solaris: Find out disks CU:LDev and /dev/rdsk/

# more xpinfo.out

Device File : /dev/rdsk/c1t50060E80056F1168d0s2        Model : XP24000
       Port : CL7J                                  Serial # : 00028433
Host Target : c38c                                  Code Rev : 6006
  Array LUN : 00                                   Subsystem : 0085
    CU:LDev : 81:6c                                 CT Group : ---
       Type : OPEN-V      -SUN                     CA Volume : SMPL
       Size : 51200 MB                            BC0 (MU#0) : SMPL
       ALPA : 6d                                  BC1 (MU#1) : SMPL
    Loop Id : 43                                  BC2 (MU#2) : SMPL
    SCSI Id : ---
 RAID Level : TPVOL                               RAID Type  : ---
 RAID Group : ---                                   ACP Pair : ---
 Disk Mechs : ---     ---     ---     ---
     FC-LUN : 00006f110000816c                      Port WWN : 50060e80056f1168
HBA Node WWN: 2000001b329c0eb8                   HBA Port WWN: 2100001b329c0eb8
 Vol Group  : ---                                Vol Manager : ---
Mount Points: ---
  DMP Paths : ---
  SLPR : 0                                         CLPR : 0

Device File : /dev/rdsk/c1t50060E80056F1168d1s2        Model : XP24000
# luxadm display /dev/rdsk/c1t50060E80056F1168d0s2
DEVICE PROPERTIES for disk: /dev/rdsk/c1t50060E80056F1168d0s2
  Vendor:               HP
  Product ID:           OPEN-V      -SUN
  Revision:             6006
  Serial Num:           50 06F11816C <----CU:LDev
  Unformatted capacity: 51200.625 MBytes
  Write Cache:          Enabled
  Read Cache:           Enabled
    Minimum prefetch:   0x0
    Maximum prefetch:   0x0
  Device Type:          Disk device
  Path(s):

  /dev/rdsk/c1t50060E80056F1168d0s2
  /devices/pci@2,600000/SUNW,qlc@0/fp@0,0/ssd@w50060e80056f1168,0:c,raw
    LUN path port WWN:          50060e80056f1168
    Host controller port WWN:   2100001b329c0eb8
    Path status:                O.K.
  /dev/rdsk/c2t50060E80056F1178d0s2
  /devices/pci@3,700000/SUNW,qlc@0/fp@0,0/ssd@w50060e80056f1178,0:c,raw
    LUN path port WWN:          50060e80056f1178
    Host controller port WWN:   2100001b329c55ab
    Path status:                O.K.

Tuesday, February 21, 2012

Solaris: vxvm determine which disk multipathing

cluster01# vxdisk list
DEVICE       TYPE            DISK         GROUP        STATUS
disk_0       auto:SVM        -            -            SVM
disk_1       auto:SVM        -            -            SVM
xp24k0_6bc4  auto:cdsdisk    egate_dg01   egate_dg     online thin nohotuse
xp24k0_6bc5  auto:cdsdisk    eaicore_dg01  eaicore_dg   online thin nohotuse
xp24k0_6bc6  auto:cdsdisk    -            -            online thin
xp24k0_6bc7  auto:cdsdisk    optora_dg01  optora_dg    online thin
xp24k0_6bf0  auto:cdsdisk    eaiuser1_dg01  eaiuser1_dg  online thin
xp24k0_6bfa  auto:cdsdisk    eaiuser11_dg01  eaiuser11_dg online thin
xp24k0_6bfb  auto:cdsdisk    eaiuser12_dg01  eaiuser12_dg online thin
xp24k0_6bfc  auto:cdsdisk    eaiuser13_dg01  eaiuser13_dg online thin
xp24k0_6bf1  auto:cdsdisk    -            -            online thin
xp24k0_6bf2  auto:cdsdisk    eaiuser3_dg01  eaiuser3_dg  online thin
xp24k0_6bf3  auto:cdsdisk    eaiuser4_dg01  eaiuser4_dg  online thin
xp24k0_6bf4  auto:cdsdisk    eaiuser5_dg01  eaiuser5_dg  online thin
xp24k0_6bf5  auto:cdsdisk    -            -            online thin
xp24k0_6bf6  auto:cdsdisk    -            -            online thin
xp24k0_6bf7  auto:cdsdisk    -            -            online thin
xp24k0_6bf8  auto:cdsdisk    -            -            online thin
xp24k0_6bf9  auto:cdsdisk    -            -            online thin
xp24k0_6b8b  auto:cdsdisk    oraeai2_dg01  oraeai2_dg   online thin
xp24k0_6b8c  auto:cdsdisk    oraeai1_dg01  oraeai1_dg   online thin
xp24k0_6b8d  auto:cdsdisk    oraeai2_dg02  oraeai2_dg   online thin
xp24k0_6b8e  auto:cdsdisk    oraeai1_dg02  oraeai1_dg   online thin
xp24k0_6b8f  auto:cdsdisk    oraeai2_dg03  oraeai2_dg   online thin
xp24k0_6b41  auto:none       -            -            online invalid
xp24k0_6b90  auto:cdsdisk    oraeai1_dg03  oraeai1_dg   online thin
xp24k0_6b91  auto:cdsdisk    oraeai1_dg04  oraeai1_dg   online thin
xp24k0_6b92  auto:cdsdisk    oraeai1_dg05  oraeai1_dg   online thin
xp24k0_6b93  auto:cdsdisk    mqha_dg01    mqha_dg      online thin nohotuse
xp24k0_6b94  auto:cdsdisk    oraeai2_dg04  oraeai2_dg   online thin
xp24k0_6b95  auto:cdsdisk    oraeai2_dg05  oraeai2_dg   online thin
xp24k0_6b96  auto:cdsdisk    -            -            online thin
xp24k0_6b97  auto:cdsdisk    -            -            online thin
xp24k0_6b98  auto:cdsdisk    -            -            online thin
xp24k0_6c4a  auto:cdsdisk    eaiuser4_dg02  eaiuser4_dg  online thin
xp24k0_6c4b  auto:cdsdisk    -            -            online thin
xp24k0_6c4c  auto:cdsdisk    -            -            online thin
xp24k0_6c4d  auto:cdsdisk    eaiuser11_dg02  eaiuser11_dg online thin
xp24k0_6c4e  auto:cdsdisk    oraeai1_dg06  oraeai1_dg   online thin
xp24k0_6c4f  auto:cdsdisk    oraeai2_dg06  oraeai2_dg   online thin
xp24k0_62ce  auto:cdsdisk    oraeai2_dg07  oraeai2_dg   online thin nohotuse
xp24k0_66ef  auto:cdsdisk    oraeai2_dg09  oraeai2_dg   online thin nohotuse
xp24k0_66f0  auto:cdsdisk    oraeai2_dg10  oraeai2_dg   online thin nohotuse
xp24k0_615f  auto:cdsdisk    oraeai1_dg07  oraeai1_dg   online thin
xp24k0_834b  auto:cdsdisk    seebeyondvcp2_rootdg01  seebeyondvcp2_rootdg online thin nohotuse
xp24k0_6336  auto:cdsdisk    oraeai2_dg08  oraeai2_dg   online thin
cluster011# vxdisk list xp24k0_6c4d
Device:    xp24k0_6c4d
devicetag: xp24k0_6c4d
type:      auto
hostid:    cluster01
disk:      name=eaiuser11_dg02 id=1280996984.101.cluster01
group:     name=eaiuser11_dg id=1279714356.151.cluster01
info:      format=cdsdisk,privoffset=256,pubslice=2,privslice=2
flags:     online ready private autoconfig noautoimport imported thin
pubpaths:  block=/dev/vx/dmp/xp24k0_6c4ds2 char=/dev/vx/rdmp/xp24k0_6c4ds2
guid:      {99bcadaa-a06b-11df-8fe1-0021286cbf2e}
udid:      HP%5F50%5F1320C%5F50%201320C6C4D
site:      -
version:   3.1
iosize:    min=512 (bytes) max=2048 (blocks)
public:    slice=2 offset=65792 len=83799808 disk_offset=0
private:   slice=2 offset=256 len=65536 disk_offset=0
update:    time=1325053781 seqno=0.183
ssb:       actual_seqno=0.0
headers:   0 240
configs:   count=1 len=48144
logs:      count=1 len=7296
Defined regions:
 config   priv 000048-000239[000192]: copy=01 offset=000000 enabled
 config   priv 000256-048207[047952]: copy=01 offset=000192 enabled
 log      priv 048208-055503[007296]: copy=01 offset=000000 enabled
 lockrgn  priv 055504-055647[000144]: part=00 offset=000000
Multipathing information:
numpaths:   2
c1t50060E8015320C4Dd34s2        state=enabled
c2t50060E8015320C5Dd34s2        state=enabled
cluster01#



cluster01# vxdmpadm getdmpnode nodename=c1t50060E8015320C4Dd34s2
NAME                 STATE     ENCLR-TYPE   PATHS  ENBL  DSBL  ENCLR-NAME
=========================================================================
xp24k0_6c4d           ENABLED   Disk         2      2     0     Disk
************************************************************************


Other Useful cmd
cluster01# vxdmpadm getsubpaths ctlr=c1
cluster01# vxdmpadm getsubpaths dmpnodename=xp24k0_6c4d

Tuesday, December 6, 2011

UNIX Monitoring pipe activity with pv

Monitoring pipe activity with pv

$ dd if=/dev/zero | pv > foo

522MB 0:00:06 [ 109MB/s] [ <=> ]

When pv is added to the pipeline, you get a continuous display of the amount of data that is being transferred between two pipe endpoints. I really dig this utility, and I am stoked that I found the catonmat website! Niiiiiiiiice!

UPDATE:

Try using pv when sending stuff over the network using dd. Neato.

[root@machine2 ~]# ssh machine1 “dd if=/dev/VolGroup00/domU2migrate”|pv -s 8G -petr|dd of=/dev/xen02vg/domU2migrate
0:00:30 [11.2MB/s] [====> ] 4% ETA :10:13

Want to rate limit the transfer so you don’t flood the pipe?

-L RATE, –rate-limit RATE
Limit the transfer to a maximum of RATE bytes per second. A suffix of “k”, “m”, “g”, or “t” can be added to denote kilobytes (*1024), megabytes, and so on.

-B BYTES, –buffer-size BYTES
Use a transfer buffer size of BYTES bytes. A suffix of “k”, “m”, “g”, or “t” can be added to denote kilobytes (*1024), megabytes, and so on. The default buffer size is the block size of the input file’s filesystem multiplied by 32 (512kb max), or 400kb if the block size cannot be determined.

Already have a transfer in progress and want to rate limit it without restarting?

-R PID, –remote PID
If PID is an instance of pv that is already running, -R PID will cause that instance to act as though it had been given this instance’s command line instead. For example, if pv -L 123k is running with process ID 9876, then running pv -R 9876 -L 321k will cause it to start using a rate limit of 321k instead of 123k. Note that some options cannot be changed while running, such as -c, -l, and -

Solaris 10: Adding a file system to a running zone

Since the global zone uses loopback mounts to present file systems to zones, adding a new file system was as easy as loopback mounting the file system into the zone’s file system:

$ mount -F lofs /filesystems/zone1oracle03 /zones/zone1/root/ora03

Once the file system was mounted, I added it to the zone configuration and then verified it was mounted:

$ mount | grep ora03/filesystems/zone1oracle03 on filesystems/zone1oracle0 read/write/setuid/devices/nonbmand/exec/xattr/atime/dev=2d9000b on Sun Apr 12 10:43:19 2009 /zones/zone1/root/ora03 on /filesystems/zone1oracle03 read/write/setuid/devices/dev=2d9000b on Sun Apr 12 10:44:07 2009



With ZFS filesystem (mountpoint=legacy):
mount -F zfs zpool/fs /path/to/zone/root/fs

Linux: remount read only file system

$ mount -o remount,rw /

Once you can write to the file system you should be able to write out changes to the file system to correct the issue that prevented the server from booting. Viva la remount!

Solaris: coreadm core file management

Using the Solaris coreadm utility to control core file generation

Solaris has shipped with the coreadm utiltiy for quite some time, and this nifty little utility allows you to control every facet of core file generation. This includes the ability to control where core files are written, the name of core files, which portions of the processes address space will be written to the core file, and my favorite option, whether or not to generate a syslog entry indicating that a core file was generated.
To begin using coreadm, you will first need to run it wit the “-g” option to specify where core files should be stored, and the pattern that should be used when creating the core file:
coreadm -g /var/core/core.%f.%p
Once a directory and file pattern are specified, you can optionally adjust which portions of the processes address space (e.g., text segment, heap, ISM, etc.) will be written to the core file. To ease debugging, I like to configure coreadm to dump everything with the”-G all” option:
coreadm -G all
Since core files are typically created at odd working hours, I also like to configure coreadm to log messages to syslog indicating that a core file was created. This can be done by using the coreadm “-e log” option:
coreadm -e log
After these settings are adjusted, the coreadm “-e global” option can be used to enable global core file generation, and the coreadm utility can be run without any arguments to view the settings (which are stored in /etc/coreadm.conf):
coreadm -e global
coreadm
global core file pattern: /var/core/core.%f.%p
     global core file content: all
       init core file pattern: core
       init core file content: default
            global core dumps: enabled
       per-process core dumps: enabled
      global setid core dumps: disabled
 per-process setid core dumps: disabled
     global core dump logging: enabled
Once global core file support is enabled, each time a process receives a deadly signal (e.g., SIGSEGV, SIGBUS, etc.):
$ kill -SIGSEGV 4652
A core file will be written to /var/core:
ls -al /var/core/*4652
-rw-------   1 root     root     4163953 Mar  9 11:51 /var/core/core.inetd.4652
And a message similar to the following will appear in the system log:
Mar 9 11:51:48 fubar genunix: [ID 603404 kern.notice] NOTICE: core_log: inetd[4652] core dumped: /var/core/core.inetd.4652
This is an amazingly useful feature, and can greatly simplify root causing software problems.