Wednesday, December 24, 2008

Solaris 10 HP-OPEN-V-SUN

root@hostname # cfgadm -o show_FCP_dev -al
Ap_Id Type Receptacle Occupant Condition
c2 fc-fabric connected configured unknown
c2::50060e8004f25e46,0 disk connected configured unknown
c2::50060e8004f25e46,1 disk connected configured unknown
c2::50060e8004f25e46,2 disk connected configured unknown
c2::50060e8004f25e46,3 disk connected configured unknown
c2::50060e8004f25e46,4 disk connected configured unknown
c2::50060e8004f25e46,5 disk connected configured unknown
c2::50060e8004f25e46,6 disk connected configured unknown
c2::50060e8004f25e46,7 disk connected configured unknown
c2::50060e8004f25e46,8 disk connected configured unknown
c2::50060e8004f25e46,9 disk connected configured unknown
c2::50060e8004f25e46,10 disk connected configured unknown
c2::50060e8004f25e46,11 disk connected configured unknown
c2::50060e8004f25e46,12 disk connected configured unknown
c2::50060e8004f25e46,13 disk connected configured unknown
c2::50060e8004f25e46,14 disk connected configured unknown
c2::50060e8004f25e46,15 disk connected configured unknown
c2::50060e8004f25e46,16 disk connected configured unknown
c2::50060e8004f25e46,17 disk connected configured unknown
c2::50060e8004f25e46,18 disk connected configured unknown
c2::50060e8004f25e46,19 disk connected configured unknown
c2::50060e8004f25e46,20 disk connected configured unknown
c2::50060e8004f25e46,21 disk connected configured unknown
c2::50060e8004f25e46,22 disk connected configured unknown
c2::50060e8004f25e46,23 disk connected configured unknown
c2::50060e8004f25e46,24 disk connected configured unknown
c2::50060e8004f25e46,25 disk connected configured unknown
c2::50060e8004f25e46,26 disk connected configured unknown
c2::50060e8004f25e46,27 disk connected configured unknown
c2::50060e8004f25e46,28 disk connected configured unknown
c2::50060e8004f25e46,29 disk connected configured unknown
c2::50060e8004f25e46,30 disk connected configured unknown
c2::50060e8004f25e46,31 disk connected configured unknown
c2::50060e8004f25e46,32 disk connected configured unknown
c2::50060e8004f25e46,33 disk connected configured unknown
c2::50060e8004f25e46,34 disk connected configured unknown
c2::50060e8004f25e46,35 disk connected configured unknown
c2::50060e8004f25e46,36 disk connected configured unknown
c2::50060e8004f25e46,37 disk connected configured unknown
c2::50060e8004f25e46,38 disk connected configured unknown
c2::50060e8004f25e46,39 disk connected configured unknown
c2::50060e8004f25e46,40 disk connected configured unknown
c2::50060e8004f25e46,41 disk connected configured unknown
c2::50060e8004f25e46,42 disk connected configured unknown
c2::50060e8004f25e46,43 disk connected configured unknown
c2::50060e8004f25e46,44 disk connected configured unknown
c2::50060e8004f25e46,45 disk connected configured unknown
c2::50060e8004f25e46,46 disk connected configured unknown
c2::50060e8004f25e46,47 disk connected configured unknown
c2::50060e8004f25e46,48 disk connected configured unknown
c2::50060e8004f25e46,49 disk connected configured unknown
c2::50060e8004f25e46,50 disk connected configured unknown
c2::50060e8004f25e46,51 disk connected configured unknown
c2::50060e8004f25e46,52 disk connected configured unknown
c2::50060e8004f25e46,53 disk connected configured unknown
c2::50060e8004f25e46,54 disk connected configured unknown
c2::50060e8004f25e46,55 disk connected configured unknown
c2::50060e8004f25e46,56 disk connected configured unknown
c2::50060e8004f25e46,57 disk connected configured unknown
c2::50060e8004f25e46,58 disk connected configured unknown
c2::50060e8004f25e46,59 disk connected configured unknown
c2::50060e8004f25e46,60 disk connected configured unknown
c2::50060e8004f25e46,61 disk connected configured unknown
c2::50060e8004f25e46,62 disk connected configured unknown
c2::50060e8004f25e46,63 disk connected configured unknown
c2::50060e8004f25e46,64 disk connected configured unknown
c2::50060e8004f25e46,65 disk connected configured unknown
c3 fc-fabric connected configured unknown
c3::50060e8004f25e56,0 disk connected configured unknown
c3::50060e8004f25e56,1 disk connected configured unknown
c3::50060e8004f25e56,2 disk connected configured unknown
c3::50060e8004f25e56,3 disk connected configured unknown
c3::50060e8004f25e56,4 disk connected configured unknown
c3::50060e8004f25e56,5 disk connected configured unknown
c3::50060e8004f25e56,6 disk connected configured unknown
c3::50060e8004f25e56,7 disk connected configured unknown
c3::50060e8004f25e56,8 disk connected configured unknown
c3::50060e8004f25e56,9 disk connected configured unknown
c3::50060e8004f25e56,10 disk connected configured unknown
c3::50060e8004f25e56,11 disk connected configured unknown
c3::50060e8004f25e56,12 disk connected configured unknown
c3::50060e8004f25e56,13 disk connected configured unknown
c3::50060e8004f25e56,14 disk connected configured unknown
c3::50060e8004f25e56,15 disk connected configured unknown
c3::50060e8004f25e56,16 disk connected configured unknown
c3::50060e8004f25e56,17 disk connected configured unknown
c3::50060e8004f25e56,18 disk connected configured unknown
c3::50060e8004f25e56,19 disk connected configured unknown
c3::50060e8004f25e56,20 disk connected configured unknown
c3::50060e8004f25e56,21 disk connected configured unknown
c3::50060e8004f25e56,22 disk connected configured unknown
c3::50060e8004f25e56,23 disk connected configured unknown
c3::50060e8004f25e56,24 disk connected configured unknown
c3::50060e8004f25e56,25 disk connected configured unknown
c3::50060e8004f25e56,26 disk connected configured unknown
c3::50060e8004f25e56,27 disk connected configured unknown
c3::50060e8004f25e56,28 disk connected configured unknown
c3::50060e8004f25e56,29 disk connected configured unknown
c3::50060e8004f25e56,30 disk connected configured unknown
c3::50060e8004f25e56,31 disk connected configured unknown
c3::50060e8004f25e56,32 disk connected configured unknown
c3::50060e8004f25e56,33 disk connected configured unknown
c3::50060e8004f25e56,34 disk connected configured unknown
c3::50060e8004f25e56,35 disk connected configured unknown
c3::50060e8004f25e56,36 disk connected configured unknown
c3::50060e8004f25e56,37 disk connected configured unknown
c3::50060e8004f25e56,38 disk connected configured unknown
c3::50060e8004f25e56,39 disk connected configured unknown
c3::50060e8004f25e56,40 disk connected configured unknown
c3::50060e8004f25e56,41 disk connected configured unknown
c3::50060e8004f25e56,42 disk connected configured unknown
c3::50060e8004f25e56,43 disk connected configured unknown
c3::50060e8004f25e56,44 disk connected configured unknown
c3::50060e8004f25e56,45 disk connected configured unknown
c3::50060e8004f25e56,46 disk connected configured unknown
c3::50060e8004f25e56,47 disk connected configured unknown
c3::50060e8004f25e56,48 disk connected configured unknown
c3::50060e8004f25e56,49 disk connected configured unknown
c3::50060e8004f25e56,50 disk connected configured unknown
c3::50060e8004f25e56,51 disk connected configured unknown
c3::50060e8004f25e56,52 disk connected configured unknown
c3::50060e8004f25e56,53 disk connected configured unknown
c3::50060e8004f25e56,54 disk connected configured unknown
c3::50060e8004f25e56,55 disk connected configured unknown
c3::50060e8004f25e56,56 disk connected configured unknown
c3::50060e8004f25e56,57 disk connected configured unknown
c3::50060e8004f25e56,58 disk connected configured unknown
c3::50060e8004f25e56,59 disk connected configured unknown
c3::50060e8004f25e56,60 disk connected configured unknown
c3::50060e8004f25e56,61 disk connected configured unknown
c3::50060e8004f25e56,62 disk connected configured unknown
c3::50060e8004f25e56,63 disk connected configured unknown
c3::50060e8004f25e56,64 disk connected configured unknown
c3::50060e8004f25e56,65 disk connected configured unknown
root@hostname # pwd
/root/home/hpfimhas
root@hostname # cfgadm -o show_FCP_dev -al > cfgadm_o_show_FCP_dev_al.out.07112008
root@hostname # ls
cfgadm_o_show_FCP_dev_al.out.07112008 format.out.061108
df_k.out.061108 metastat_p.out.061108
root@hostname # cfg
root@hostname #
root@hostname #
root@hostname #
root@hostname #
root@hostname # uname -a
SunOS hostname 5.10 Generic_118833-36 sun4v sparc SUNW,Sun-Fire-T200
root@hostname # cfgadm -c configure c2
root@hostname # cfgadm -c configure c3
root@hostname # tail /var/adm/messages
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000dd7 (ssd22) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,9 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000dd6 (ssd23) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,8 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000dd5 (ssd24) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,7 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000dd4 (ssd25) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,6 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000719 (ssd0) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,5 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000718 (ssd1) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,4 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000717 (ssd2) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,3 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000716 (ssd3) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,2 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000715 (ssd4) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,1 is online Load balancing: round-robin
Nov 7 02:54:14 hostname genunix: [ID 834635 kern.info] /scsi_vhci/ssd@g60060e8004f25e000000f25e00000714 (ssd5) multipath status: optimal, path /pci@7c0/pci@0/pci@9/SUNW,qlc@0/fp@0,0 (fp0) to target address: w50060e8004f25e56,0 is online Load balancing: round-robin
root@hostname #
root@hostname # grep -i label /var/adm/messages
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:43 hostname Corrupt label; wrong magic number
Nov 7 01:56:44 hostname Corrupt label; wrong magic number
Nov 7 01:56:44 hostname Corrupt label; wrong magic number
Nov 7 01:56:44 hostname Corrupt label; wrong magic number
Nov 7 01:56:44 hostname Corrupt label; wrong magic number
root@hostname # echo "\n" | format > format.today

root@hostname # more fornat.today
Searching for disks...done

c4t60060E8004F25E000000F25E00000F0Ed0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F0Fd0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F1Ad0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F1Bd0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F1Cd0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F1Dd0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F1Ed0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F1Fd0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F10d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F11d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F12d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F13d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F14d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F15d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F16d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F17d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F18d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F19d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F20d0: configured with capacity of 25.28GB
c4t60060E8004F25E000000F25E00000F21d0: configured with capacity of 25.28GB


AVAILABLE DISK SELECTIONS:
root@hostname # more disktolabel.out
c4t60060E8004F25E000000F25E00000F0Ed0: configured with capacity of 25.28GB
38. c4t60060E8004F25E000000F25E00000F0Ed0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F0Fd0: configured with capacity of 25.28GB
39. c4t60060E8004F25E000000F25E00000F0Fd0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F1Ad0: configured with capacity of 25.28GB
40. c4t60060E8004F25E000000F25E00000F1Ad0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F1Bd0: configured with capacity of 25.28GB
41. c4t60060E8004F25E000000F25E00000F1Bd0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F1Cd0: configured with capacity of 25.28GB
42. c4t60060E8004F25E000000F25E00000F1Cd0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F1Dd0: configured with capacity of 25.28GB
43. c4t60060E8004F25E000000F25E00000F1Dd0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F1Ed0: configured with capacity of 25.28GB
44. c4t60060E8004F25E000000F25E00000F1Ed0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F1Fd0: configured with capacity of 25.28GB
45. c4t60060E8004F25E000000F25E00000F1Fd0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F10d0: configured with capacity of 25.28GB
46. c4t60060E8004F25E000000F25E00000F10d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F11d0: configured with capacity of 25.28GB
47. c4t60060E8004F25E000000F25E00000F11d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F12d0: configured with capacity of 25.28GB
48. c4t60060E8004F25E000000F25E00000F12d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F13d0: configured with capacity of 25.28GB
49. c4t60060E8004F25E000000F25E00000F13d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F14d0: configured with capacity of 25.28GB
50. c4t60060E8004F25E000000F25E00000F14d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F15d0: configured with capacity of 25.28GB
51. c4t60060E8004F25E000000F25E00000F15d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F16d0: configured with capacity of 25.28GB
52. c4t60060E8004F25E000000F25E00000F16d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F17d0: configured with capacity of 25.28GB
53. c4t60060E8004F25E000000F25E00000F17d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F18d0: configured with capacity of 25.28GB
54. c4t60060E8004F25E000000F25E00000F18d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F19d0: configured with capacity of 25.28GB
55. c4t60060E8004F25E000000F25E00000F19d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F20d0: configured with capacity of 25.28GB
56. c4t60060E8004F25E000000F25E00000F20d0 2 hd 15 sec 512>
c4t60060E8004F25E000000F25E00000F21d0: configured with capacity of 25.28GB
57. c4t60060E8004F25E000000F25E00000F21d0 2 hd 15 sec 512>
root@hostname #

------ above is done---- now need to create new metadevice concat /ftp/data2 ---
root@hostname # more checkifdiskisused.sh
#! /bin/sh
for i in `cat a.out`
do
grep -i $i metastat_p.out
done
root@hostname # sh checkifdiskisused.sh
root@hostname # more metastat_p.out
d103 -m d203 d303 1
d203 1 1 c0t0d0s4
d303 1 1 c0t1d0s4
d102 -m d202 d302 1
d202 1 1 c0t0d0s3
d302 1 1 c0t1d0s3
d101 -m d201 d301 1
d201 1 1 c0t0d0s1
d301 1 1 c0t1d0s1
d100 -m d200 d300 1
d200 1 1 c0t0d0s0
d300 1 1 c0t1d0s0
d104 3 1 /dev/dsk/c4t60060E8004F25E000000F25E00000714d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000715d0s1 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000717d0s0
d107 40 1 /dev/dsk/c4t60060E8004F25E000000F25E00000DD4d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DD5d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DD6d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DD7d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DD8d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DD9d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DDAd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DDBd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DDCd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DDDd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DDEd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DDFd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DE0d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DE1d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DE2d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DE3d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DE4d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DE5d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DE6d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000DE7d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000718d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000719d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00001130d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00001131d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000D6d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000D7d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000D8d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000D9d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000DAd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000DBd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000DCd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000DDd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000DEd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000DFd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000E0d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000E1d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000E2d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000E3d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000E4d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E000000E5d0s0
d112 1 1 /dev/dsk/c4t60060E8004F25E000000F25E00000716d0s3
d109 2 1 /dev/dsk/c4t60060E8004F25E000000F25E00000715d0s3 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000715d0s4
d110 1 1 /dev/dsk/c4t60060E8004F25E000000F25E00000716d0s0
d106 1 1 /dev/dsk/c4t60060E8004F25E000000F25E00000714d0s3
d105 1 1 /dev/dsk/c4t60060E8004F25E000000F25E00000714d0s1
d111 1 1 /dev/dsk/c4t60060E8004F25E000000F25E00000716d0s1
d108 1 1 /dev/dsk/c4t60060E8004F25E000000F25E00000715d0s0
root@hostname #
root@hostname # metastat -p | grep d113
root@hostname #

metainit d113 1 1 /dev/dsk/c4t60060E8004F25E000000F25E00000F0Ed0s0

-----

metattach d113
metattach d113 c4t60060E8004F25E000000F25E00000F0Fd0s0
metattach d113 c4t60060E8004F25E000000F25E00000F1Ad0s0
metattach d113 c4t60060E8004F25E000000F25E00000F1Bd0s0
metattach d113 c4t60060E8004F25E000000F25E00000F1Cd0s0
metattach d113 c4t60060E8004F25E000000F25E00000F1Dd0s0
metattach d113 c4t60060E8004F25E000000F25E00000F1Ed0s0
metattach d113 c4t60060E8004F25E000000F25E00000F1Fd0s0
metattach d113 c4t60060E8004F25E000000F25E00000F10d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F11d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F12d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F13d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F14d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F15d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F16d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F17d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F18d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F19d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F20d0s0
metattach d113 c4t60060E8004F25E000000F25E00000F21d0s0


+ metastat -p d113
d113 20 1 /dev/dsk/c4t60060E8004F25E000000F25E00000F0Ed0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F0Fd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F1Ad0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F1Bd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F1Cd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F1Dd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F1Ed0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F1Fd0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F10d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F11d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F12d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F13d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F14d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F15d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F16d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F17d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F18d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F19d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F20d0s0 \
1 /dev/dsk/c4t60060E8004F25E000000F25E00000F21d0s0
root@hostname #newfs -Tv /dev/md/rdsk/d113
root@hostname # newfs -Tv /dev/md/rdsk/d113
newfs: construct a new file system /dev/md/rdsk/d113: (y/n)? y
mkfs -F ufs /dev/md/rdsk/d113 1060308480 512 15 8192 8192 -1 1 90 1048576 t 0 -1 8 16 y
/dev/md/rdsk/d113: 1060308480 sectors in 138061 cylinders of 15 tracks, 512 sectors
517728.8MB in 1424 cyl groups (97 c/g, 363.75MB/g, 384 i/g)
super-block backups (for fsck -F ufs -o b=#) at:
32, 745504, 1490976, 2236448, 2981920, 3727392, 4472864, 5218336, 5963808,
6709280,
Initializing cylinder groups:
............................
super-block backups for last 10 cylinder groups at:
1053376544, 1054122016, 1054867488, 1055612960, 1056358432, 1057103904,
1057849376, 1058594848, 1059340320, 1060085792
root@hostname #

Saturday, December 13, 2008

Solaris Moving Directories to Other Location

Sometimes using tar we receive error file too large. Use cpio instead for same results
% cd /target-directory

% find . -depth -print | cpio -pudmv /dest-directory


example

#! /bin/sh
cd /ftp/data/pdu-msa/EDA-SLS
find . -depth -print | cpio -pudmv /ftp/data2/pdu-msa/EDA-SLS

Friday, November 7, 2008

Sol10: How to mount lofs from localzone

Sol10: How to mount lofs from localzone
---------------------------------------
On global zone, mounted file system looks like this

/dev/md/dsk/d113 530063064 65560 524696880 1% /zones/myzone/ftp/data2


run this zonecfg command
zonecfg -z myzone
> add fs
> set dir=/ftp/data2
> set special=/zones/myzone/ftp/data2
> set type=lofs
> end
> verify
> commit
> exit

run below command to confirm
#zonecfg -z myzone info
....
fs:
dir: /ftp/data2
special: /zones/myzone/ftp/data2
raw not specified
type: lofs
options: []


....

root@myzone # mount -F lofs /ftp/data2 /ftp/data2

root@myzone # df -k /ftp/data2
Filesystem kbytes used avail capacity Mounted on
/ftp/data2 530063064 65560 524696880 1% /ftp/data2

Sunday, September 21, 2008

Solaris 10 How to stop start automountd

# svcs | grep auto
legacy_run Oct_24 lrc:/etc/rc2_d/S72autoinstall
online 17:31:10 svc:/system/filesystem/autofs:default
To stop
# svcadm -v disable svc:/system/filesystem/autofs:default
To start
# svcadm -v enable svc:/system/filesystem/autofs:default

Friday, September 19, 2008

HPUX GSP toggle between console and command mode

CO : COnsole- leave command mode and return to console mode
This command exits the GSP command interface, and connects to the system console. All mirrored users are switched. Type CTRL-B to return to the GSP command interface.

HPUX GSP Console get write access

Typing "CO" from the GSP Command interface provides a mirrored version of the OS console. All mirrored users see the same output. At any time, only one of the mirrored users has write access to the console. To get write access to the console, type CTRL-e c f (not CTRL-e CTRL-c CTRL-f).

SEE ALSO: CO (COnsole)

Solaris when LUN dynamically removed JNI

When the driver dynamically removes a LUN, not all references to that
LUN are removed. This is because the file system nodes in the
/dev/dsk and /dev/rmt directories are scanned to show the devices.
However, when devices are removed dynamically via EZ Fibre or the
jnic146x_update_drv command, only the kernel device nodes are
removed. To remove the actual file system nodes, execute the
following command:

#devfsadm -C -v

Solaris verify LUN disk size

Verify size of LUN.
# prtvtoc /dev/rdsk/c#t#d#s2 > vtoc.lun.out

Solaris save format output to file

# echo|format > format.out

This is useful when you have new disk/lun, and lazy to find out which one is new
And make it easy if you wnat to execute command as below

#metattach d017 /dev/dsk/c6t60060E8004F25E000000F25E000009E1d0


root@hostorap1 # grep -i warning /var/adm/messages
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000623 (ssd34):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000622 (ssd35):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000621 (ssd36):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000620 (ssd37):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061f (ssd38):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061e (ssd39):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061d (ssd40):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061c (ssd41):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061b (ssd42):
Sep 18 10:07:29 hostorap1 scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061a (ssd43):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000619 (ssd44):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000618
root@host # more format.out
Searching for disks...done

c6t60060E8004F25E000000F25E000009E1d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E2d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E3d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E4d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Ad0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Bd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Cd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Dd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Ed0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Fd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000618d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000619d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000620d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000621d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000622d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000623d0: configured with capacity of 25.28GB


AVAILABLE DISK SELECTIONS:
0. c0t0d0
/ssm@0,0/pci@18,600000/pci@2/scsi@2/sd@0,0
1. c1t0d0
/ssm@0,0/pci@1c,600000/pci@2/scsi@2,1/sd@0,0
2. c6t60060E8004F25E000000F25E00000DE8d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000de8
3. c6t60060E8004F25E000000F25E00000DE9d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000de9
4. c6t60060E8004F25E000000F25E00000DEAd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000dea
5. c6t60060E8004F25E000000F25E00000DEBd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000deb
6. c6t60060E8004F25E000000F25E00000EA0d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea0
7. c6t60060E8004F25E000000F25E00000EA1d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea1
8. c6t60060E8004F25E000000F25E00000EA2d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea2
9. c6t60060E8004F25E000000F25E00000EA3d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea3
10. c6t60060E8004F25E000000F25E00000EA4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea4
11. c6t60060E8004F25E000000F25E00000EA5d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea5
12. c6t60060E8004F25E000000F25E00000EA6d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea6
13. c6t60060E8004F25E000000F25E00000EA7d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea7
14. c6t60060E8004F25E000000F25E000004A4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a4
15. c6t60060E8004F25E000000F25E000004A5d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a5
16. c6t60060E8004F25E000000F25E000004A6d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a6
17. c6t60060E8004F25E000000F25E000004A7d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a7
18. c6t60060E8004F25E000000F25E000009E1d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e1
19. c6t60060E8004F25E000000F25E000009E2d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e2
20. c6t60060E8004F25E000000F25E000009E3d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e3
21. c6t60060E8004F25E000000F25E000009E4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e4
22. c6t60060E8004F25E000000F25E0000037Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037a
23. c6t60060E8004F25E000000F25E0000037Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037b
24. c6t60060E8004F25E000000F25E0000037Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037c
25. c6t60060E8004F25E000000F25E0000037Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037d
26. c6t60060E8004F25E000000F25E0000037Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037e
27. c6t60060E8004F25E000000F25E0000037Fd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037f
28. c6t60060E8004F25E000000F25E0000061Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061a
29. c6t60060E8004F25E000000F25E0000061Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061b
30. c6t60060E8004F25E000000F25E0000061Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061c
31. c6t60060E8004F25E000000F25E0000061Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061d
32. c6t60060E8004F25E000000F25E0000061Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061e
33. c6t60060E8004F25E000000F25E0000061Fd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061f
34. c6t60060E8004F25E000000F25E0000097Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097a
35. c6t60060E8004F25E000000F25E0000097Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097b
36. c6t60060E8004F25E000000F25E0000097Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097c
37. c6t60060E8004F25E000000F25E0000097Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097d
38. c6t60060E8004F25E000000F25E0000097Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097e
39. c6t60060E8004F25E000000F25E00000378d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000378
40. c6t60060E8004F25E000000F25E00000379d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000379
41. c6t60060E8004F25E000000F25E00000380d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000380
42. c6t60060E8004F25E000000F25E00000381d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000381
43. c6t60060E8004F25E000000F25E00000382d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000382
44. c6t60060E8004F25E000000F25E00000383d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000383
45. c6t60060E8004F25E000000F25E00000618d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000618
46. c6t60060E8004F25E000000F25E00000619d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000619
47. c6t60060E8004F25E000000F25E00000620d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000620
48. c6t60060E8004F25E000000F25E00000621d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000621
49. c6t60060E8004F25E000000F25E00000622d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000622
50. c6t60060E8004F25E000000F25E00000623d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000623
51. c6t60060E8004F25E000000F25E00000979d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000979
Specify disk (enter its number):
root@host #

Solaris replicate copy disk structure partition

Make sure disk have same geometry.
Usefull if you wnat to add new disk and lazy to repartion new disk

# prtvtoc /dev/rdsk/c?t?d?s2 | fmthard -s - /dev/rdsk/c?t?d?s2

Thursday, September 18, 2008

HPUX 9000/800/rp3440 how to power off and on

1- Login to the OS as root and issue #init 0
2- Login to GSP
Disable all types of remote access (see SA command)
*************************************************************************

*************************************************************************
Your Certificate is expired.
Use the SO command to generate a new certificate.
*************************************************************************


MP MAIN MENU:

CO: Console
VFP: Virtual Front Panel
CM: Command Menu
CL: Console Log
SL: Show Event Logs
HE: Main Help Menu
X: Exit Connection

[Server-rib] MP> CM
[Server-rib] MP:CM> pc -off


PC -off


System will be powered off.

You must shut down the OS manually before this command is executed.
Failure to do this can cause problems when the OS is restarted.
Confirm? (Y/[N]): y
y

-> System is being powered off.

-> Command successful.

[Server-rib] MP:CM> PC -on

Usage: PC [ -on | -off | -cycle ] [ -nc ]
PC -?

Wednesday, September 17, 2008

Solaris find out what type of lun hardware and command to check it out

Solaris find out what type of lun hardware and command to check it out

HDS #lunstat
format output
10. c4t98d6 DGC-RAID5-0219 cyl 40958 alt 2 hd 128 sec 10
/pci@9,600000/lpfc@1/sd@62,6

HP XP #xpinfo
format output
10. c2t4d10 HP-OPEN-E-SUN-2108 cyl 19757 alt 2 hd 15 sec 96
/pci@3,2000/fibre-channel@2/sd@4,a
To detect new luns
#cfgadm -al -o show_SCSI_LUN
#devfsadm
check messages for corrupt label, that's th new disk/lun

JNI

configuration files

/kernel/drv/
  • jnic146x.conf
  • jnic/conf
  • fcaw.conf
  • fca-pci.conf

code diag commands (from OBP)

  • fce-test - test the emerald chip
  • loop-init - verifies loop
  • fcd-loop - veifies loop on dual port adaper
  • prt-cfg - configure the adapters port
  • set-bootn-wwn - set the wwn of the boot disk

How to verify firmware level (from Solaris)

#prtconf -cp |grep fcode

How to verify presence of adapters in os

#prtconf -v |grep -i JNI
#prtconf -v |grep -i FCA
#modinfo | grep -i jni


---------------------------------------------------------------------------------------

Storage Systems

* Hitachi USP V, USP, NSC, AMS, WMS Series
* Hitachi Lightning™ 9900 and 9900 V Series
* Hitachi Thunder™ 9200 and 9500 V Series
* EMC Symmetrix™ 4.8, 5.0, 5.5
* EMC DMX 800, 1000, 2000, 3000
* EMC CLARiiON™ CX-Series, FC-4700
* HP XP Series 48, 128, 512, 1024, 10000, 12000
* HP EVA 3000, 4000, 5000, 6000, 8000
* HP MSA 1000, 1500
* HP HSG
* HP NAS, All-in-One Storage NAS
* HP EFS Clustered NAS Gateway
* HP ESL, EML tape libraries
* IBM DS4000 Series (Formerly FAStT Series)
* IBM DS4500, DS4800
* IBM ESS 800, F20
* IBM DS6000, DS6800, DS8000, DS8100, DS8300
* IBM 3581, 3582, 3583, and 3584 tape libraries
* LSI Engenio E2600, E4600, E5600, E6000
* NetApp FAS, V-Series, NearStore with Data ONTAP 6.5-7.1
* 3PAR InServ
* SGI InfiniteStorage TP Series
* Sun StorageTek 9900 , 9985, 9990
* Sun StorageTek 3510, 3511, 6130, 6140, 6540, 6920, 6940
* Sun StorageTek NAS 5210, 5310, 5320
* Sun (Formerly StorageTek) Flexline™ 200/300 Series
* Xiotech Magnitude 3D


Host Bus Adapters (HBAs)

Various HBA series and models from each of the following HBA vendors:

* Emulex
* HP
* IBM
* AMCC (Formerly JNI)
* LSI
* Qlogic
* Sun

Solaris 10 patching zones

How to Apply a Patch to the Global Zone Only
global# patchadd -G patch_id


How to Apply a Patch to the Global Zone and All Non-Global Zones
global# patchadd patch_id

How to Apply a Patch to a Specified Non-Global Zone Only
To apply a patch to a specified non-global zone only,
the SUNW_PKG_ALLZONES package parameter for all packages
in the patch set must be set to false.

local-zone# patchadd patch_id

How to Remove a Patch From the Global Zone and All Non-Global Zones
global# patchrm patch_id

How to Remove a Patch From a Specified Non-Global Zone Only
To remove a patch from a specified non-global zone only,
the SUNW_PKG_ALLZONES package parameter for all packages
in the patch set must be set to false.

my-zone# patchrm patch_id

Solaris 10 How to Add a Package

How to Add a Package to a Specified Non-Global Zone Only

local-zone# pkgadd -d /dir package_name


How to Add a Package to the Global Zone and All Non-Global Zones


global# pkgadd -d /dir package_name


How to Add a Package to the Global Zone Only

global# pkgadd -d /dir -G package_name

Solaris 10 modify the network configuration of a running zone

To add:
global# ifconfig bge0 addif 192.168.200.202 zone myzone
To remove:
global# ifconfig bge0 removeif 192.168.200.202

Tuesday, September 16, 2008

Solaris 10 login to zone console

login to global zone

# zoneadm list -vc
ID NAME STATUS PATH
0 global running /
1 zone1 running /zone/1


# zlogin -C -e\@ zone1
[Connected to zone 'zone1' console]

zone1 console login:

zone1 console login:

zone1 console login: @.
[Connection to zone 'zone1' console closed]
#

VXVM: determine current booted disk

% prtconf -vp | grep bootpath

VXVM: quickly mirroring an empty volume

Use the command below to create empty mirrored volume

# vxassist make newvol 10m layout=concat-mirror init=active disk1 disk2

Friday, September 12, 2008

Solaris v440 replace failed power supply

Need to login to System Controller
sc>
SC Alert: PSU @ PS1 has FAILED.

sc> removefru PS1
Are you sure you want to remove PS1 [y/n]? y
sc>
SC Alert: PSU @ PS1 has FAILED.

SC Alert: PSU @ PS1 has FAILED.

SC Alert: PSU @ PS1 has FAILED.

SC Alert: PSU @ PS1 has been removed.

SC Alert: Required PSU @ PS1 is not present.

SC Alert: PSU @ PS1 has been inserted.

sc> poweron PS1
sc>

Monday, September 8, 2008

HPUX How to check if patch have been installed and install the patch

let say we want to check for this patch
phkl_36745 and phne_37489

# swlist -l patch | grep -i phkl_36745
PHKL_36745.LVM-KRN 1.0 LVM.LVM-KRN applied
PHKL_36745.CORE2-KRN 1.0 OS-Core.CORE2-KRN applied
# PHKL_36745 1.0 LVM Cumulative Patch
# PHKL_36745.CORE2-KRN 1.0 OS-Core.CORE2-KRN applied
# PHKL_36745.LVM-KRN 1.0 LVM.LVM-KRN applied
# swlist -l patch | grep -i phne_37489
#

This shows that patch phkl_36745 is installed and phne_37489 is not installed yet

1. Back up your system before installing a patch.

2. Login as root.

3. Download and Copy the patch to the /tmp directory.

4. Move to the /tmp directory and unshar the patch:

cd /tmp
sh PHNE_37489

5. Run swinstall to install the patch:

swinstall -x autoreboot=true -x patch_match_target=true \
-s /tmp/PHNE_37489.depot

By default swinstall will archive the original software in
/var/adm/sw/save/PHNE_37489. If you do not wish to retain a
copy of the original software, include the patch_save_files
option in the swinstall command above:

-x patch_save_files=false

WARNING: If patch_save_files is false when a patch is installed,
the patch cannot be deinstalled. Please be careful
when using this feature.

For future reference, the contents of the PHNE_37489.text file is
available in the product readme:

swlist -l product -a readme -d @ /tmp/PHNE_37489.depot

To put this patch on a magnetic tape and install from the
tape drive, use the command:

dd if=/tmp/PHNE_37489.depot of=/dev/rmt/0m bs=2k

Friday, September 5, 2008

HPUX check network interface ip and status

# lanscan
Hardware Station Crd Hdw Net-Interface NM MAC HP-DLPI DLPI
Path Address In# State NamePPA ID Type Support Mjr#
0/1/2/0 0x00156004DF56 0 UP lan0 snap0 1 ETHER Yes 119
VLAN5001 0x00156004DF56 5001 UP lan5001 snap5001 7 ETHER Yes 119
VLAN5000 0x00156004DF56 5000 UP lan5000 snap5000 6 ETHER Yes 119
0/1/2/1 0x00156004DF57 1 UP lan1 snap1 2 ETHER Yes 119
0/5/1/0 0x00163584E02A 2 UP lan2 snap2 3 ETHER Yes 119
#
# ifconfig lan0
ifconfig: no such interface
# ifconfig lan2
ifconfig: no such interface
# netstat -i
Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
lan5001 1500 172.30.225.0 mama201-nb 2531362 0 18691869 0 0
lan5000 1500 142.133.20.0 mama201 1388313474 0 1440717679 0 0
lo0 4136 loopback localhost 30234682 0 30234690 0 0
# ifconfig lan5001
lan5001: flags=4000000000001843
inet 172.30.225.26 netmask ffffff00 broadcast 172.30.225.255
#j

Sudoers Example: su - root

As root
#visudo
add below lines, where user123 is uid that need su - privillage
user123 ALL=(ALL) /usr/bin/su -
user124 ALL=(ALL) /usr/bin/su -
user125 ALL=(ALL) /usr/bin/su -
user126 ALL=(ALL) /usr/bin/su -
:wq!

as user123
user123% sudo su -
We trust you have received the usual lecture from the local System
Administrator. It usually boils down to these two things:

#1) Respect the privacy of others.
#2) Think before you type.

Password:

Authorized access only!

root#

Sunday, August 31, 2008

Solaris VXVM grow online filesystem

root# vxdisk list
DEVICE TYPE DISK GROUP STATUS
c0t0d0s2 sliced rootdisk rootdg online
c0t1d0s2 sliced x50008 x500 online
c0t2d0s2 sliced x50007 x500 online
c0t3d0s2 sliced x50004 x500 online
c2t0d0s2 sliced disk01 rootdg online
c2t1d0s2 sliced x50002 x500 online
c2t2d0s2 sliced x50006 x500 online spare
c2t3d0s2 sliced x50003 x500 online
c3t0d0s2 sliced x50009 x500 online
c3t1d0s2 sliced x50001 x500 online
c3t3d0s2 sliced x50005 x500 online
root# vxassist -g x500 maxsize
Maximum volume size: 156450816 (76392Mb)

root# df -k /opt/app/backup
Filesystem kbytes used avail capacity Mounted on
/dev/vx/dsk/x500/app-back
9654053 9149133 473813 96% /opt/app/backup
root#
root# ./vxresize -F vxfs -g x500 app-back +3g
root# df -k /opt/app/backup
Filesystem kbytes used avail capacity Mounted on
/dev/vx/dsk/x500/app-back
12799781 9149903 3422212 73% /opt/app/backup
root#

Wednesday, July 30, 2008

Solaris sar %sys cpu high

How to find out which pid is consuming %usr kernel cpu resources
#prstat -m
eg.
$ prstat -m
PID USERNAME USR SYS TRP TFL DFL LCK SLP LAT VCX ICX SCL SIG PROCESS/NLWP
3828 hpfimhas 24 75 1.1 0.0 0.0 0.0 0.0 0.0 0 0 87K 0 prstat/1
3812 root 0.1 0.1 0.0 0.0 0.0 0.0 100 0.0 9 0 224 0 sshd/1
3820 hpfimhas 0.0 0.0 0.0 0.0 0.0 0.0 100 0.0 38 0 128 0 sshd/1
3822 hpfimhas 0.0 0.0 0.0 0.0 0.0 0.0 100 0.0 3 0 110 0 sh/1
6105 instbas 0.0 0.0 0.0 0.0 0.0 73 27 0.0 10 0 14 0 java/48
6006 lmcbejo 0.0 0.0 0.0 0.0 0.0 87 13 0.0 23 1 16 0 java/30
13769 instbas 0.0 0.0 0.0 0.0 0.0 91 9.4 0.0 4 0 9 0 java/128
5974 instbas 0.0 0.0 0.0 0.0 0.0 84 16 0.0 16 0 10 0 java/57

For solaris 10 can use dtrace, see example below

On with the show: let's say you're looking at mpstat(1) output on your
multiuser server. You might see something like this:

CPU minf mjf xcal intr ithr csw icsw migr smtx srw syscl usr sys wt idl
12 1 27 3504 338 206 765 27 114 65 1 337 9 19 41 31
13 1 19 5725 98 68 723 22 108 120 0 692 3 17 20 61
14 0 57 3873 224 192 670 22 75 86 1 805 7 10 35 48
15 36 7 1551 42 28 689 6 68 59 0 132 2 9 35 54
16 14 7 7209 504 457 1031 37 125 244 0 459 6 30 4 60
17 5 5 4960 150 108 817 37 98 154 0 375 6 26 6 62
18 5 6 6085 1687 1661 741 60 76 248 0 434 3 33 0 64
19 0 15 10037 72 41 876 23 100 291 1 454 2 19 9 71
20 12 5 5890 746 711 992 32 122 216 2 960 10 33 4 53
21 60 5 1567 729 713 467 15 80 59 0 376 2 35 10 53
22 0 6 4378 315 291 751 17 84 142 1 312 3 16 1 80
23 0 6 12119 33 3 874 20 82 384 1 513 4 24 11 62

And well you may wonder (as perhaps you often have) -- what the hell is
causing all of those cross calls, anyway? (Cross calls appear in the "xcal"
column; see mpstat(1).)

Using DTrace, investigating this is a snap:

# dtrace -n xcalls'{@[execname] = count()}'
dtrace: description 'xcalls' matched 4 probes
[ letting this run for a few seconds ]
^C

mozilla-bin 1
lockd 1
in.mpathd 2
nsrmmd 5
grep 6
chmod 6
cat 6
nwadmin 13
ls 24
in.tftpd 28
nsrindexd 34
fsflush 38
cut 42
find 42
mkdir 66
rm 76
ipop3d 78
scp 79
inetd 96
dtrace 111
nawk 118
imapd-simmonmt 126
rmdir 132
sshd 138
rpc.rstatd 159
mv 398
save 1292
gzip 1315
get_all2 1678
sched 1712
nfsd 3709
tar 27054 <----- high usage

Wednesday, July 9, 2008

Solaris 10: mount a disk slice in a sub-zone

Non-global zones in Solaris 10 do not have the ability to see drives or disk slices (to prove this, list the contents of /dev/dsk from within a zone). What filesystems are mounted in which zones is controlled exclusively from the global zone. This recipe describes persistently mounting a disk slice in a non-global zone.
To mount the device c0t1d0s3 in the existing zone testzone under the mount point /mnt, login to the global zone and become root or a priviledged user and complete the following steps:



zonecfg -z testzone
zonecfg:testzone> add fs
zonecfg:testzone:fs> set dir=/mnt
zonecfg:testzone:fs> set special=/dev/dsk/c0t1d0s3
zonecfg:testzone:fs> set raw=/dev/rdsk/c0t1d0s3
zonecfg:testzone:fs> set type=ufs
zonecfg:testzone:fs> end
zonecfg:testzone> verify
zonecfg:testzone> commit
zonecfg:testzone> exit

Following the commit command, the filesystem will be immediately available in the zone. Substitute the desired mount point and device name for your system. If the zone has not yet been created, you can use the 'add fs' commands when creating the zone.

Solaris SVM grow concat//stripe

HOw to grow filesystem under disksuite without soft partiotion for concat stripe
find unused disk by #metastat | grep -i 2f5
then, use format cmd to repatition the disk
[05:58:15] root@server[31]# format
Searching for disks...done


AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@780/pci@0/pci@9/scsi@0/sd@0,0
1. c0t1d0
/pci@780/pci@0/pci@9/scsi@0/sd@1,0
2. c4t60060E800543BE00000043BE000000BBd0
/scsi_vhci/ssd@g60060e800543be00000043be000000bb
3. c4t60060E800543BE00000043BE000000BCd0
/scsi_vhci/ssd@g60060e800543be00000043be000000bc
4. c4t60060E800543BE00000043BE000000BDd0
/scsi_vhci/ssd@g60060e800543be00000043be000000bd
5. c4t60060E800543BE00000043BE000000BEd0
/scsi_vhci/ssd@g60060e800543be00000043be000000be
6. c4t60060E800543BE00000043BE000002F4d0
/scsi_vhci/ssd@g60060e800543be00000043be000002f4
7. c4t60060E800543BE00000043BE000002F5d0
/scsi_vhci/ssd@g60060e800543be00000043be000002f5
Specify disk (enter its number):
[05:58:15] root@server[32]#metattach d17 /dev/dsk/c4t60060E800543BE00000043BE000002F5d0s0
d107: component is attached
[05:58:15] root@server[33]# df -k /dbo
Filesystem kbytes used avail capacity Mounted on
/dev/md/dsk/d107 183417068 173603462 8489617 96% /dbo
[05:58:15] root@server[34]# grep dbo /etc/mnttab
/dev/md/dsk/d107 /dbo ufs rw,intr,largefiles,logging,xattr,onerror=panic,dev=154006b 1202293409
[05:58:15] root@server[35]# growfs -M /dbo /dev/md/rdsk/d107
/dev/md/rdsk/d107: 476067840 sectors in 77485 cylinders of 48 tracks, 128 sectors
232455.0MB in 4843 cyl groups (16 c/g, 48.00MB/g, 5824 i/g)
super-block backups (for fsck -F ufs -o b=#) at:
32, 98464, 196896, 295328, 393760, 492192, 590624, 689056, 787488, 885920,
Initializing cylinder groups:
...............................................................................
.................
super-block backups for last 10 cylinder groups at:
475107488, 475205920, 475304352, 475402784, 475501216, 475599648, 475698080,
475796512, 475894944, 475993376
[05:58:15] root@server[36]# df -k /dbo
Filesystem kbytes used avail capacity Mounted on
/dev/md/dsk/d107 234430636 173603462 59503185 75% /dbo
[05:58:15] root@server[37]#

Friday, May 30, 2008

Solaris Netapps add new filesystem

The following volume options should be configured:
Nosnap No automatic scheduled Snapshot™ duplications
Minra Minimal read ahead
Nvfail NVRAM check and behavior
sapfiler1> vol options sapdata nosnap on
sapfiler1> vol options sapdata minra on
sapfiler1> vol options sapdata nvfail on
sapfiler1> vol options saplog nosnap on
sapfiler1> vol options saplog minra on
sapfiler1> vol options saplog nvfail on
sapfiler1> qtree create /vol/sapdata/sapdata_lun
sapfiler1> qtree create /vol/saplog/saplog_lun
sapfiler1> qtree create /vol/saplog/sapmnt_lun
sapfiler1> qtree create /vol/saplog/sapusr_lun
sapfiler1> qtree create /vol/saplog/trans_lun
Create the LUNs
sapfiler1> lun create -s 30g -t solaris /vol/sapdata/sapdata_lun/sapdata
sapfiler1> lun create -s 5g -t solaris /vol/saplog/saplog_lun/saplog
sapfiler1> lun create -s 500m -t solaris /vol/saplog/sapmnt_lun/sapmnt
sapfiler1> lun create -s 500m -t solaris /vol/saplog/sapusr_lun/sapusr
sapfiler1> lun create -s 200m -t solaris /vol/saplog/trans_lun/trans
Define the initiator group
To create the initiator group you need to know the WWPN of the host, which will use these LUNs. The
WWPN can be obtained with the sanlun command on the host.
bash-2.03# sanlun fcp show adapter
lpfc0 WWPN:10000000c92d55f3
sapfiler1> igroup create -f -t solaris pp400 10:00:00:00:c9:2d:55:f3
Map the LUNs to the initiator group
sapfiler1> lun map /vol/sapdata/sapdata_lun/sapdata pp400 0
sapfiler1> lun map /vol/saplog/saplog_lun/saplog pp400 6
sapfiler1> lun map /vol/saplog/sapusr_lun/sapusr pp400 7
sapfiler1> lun map /vol/saplog/sapmnt_lun/sapmnt pp400 8
sapfiler1> lun map /vol/saplog/trans_lun/trans pp400 9

Configure persistent binding
Persistent binding is configured with the tool /usr/sbin/lpfc/lputil . To configure persistent
binding you need to know the WWNN of the filer. The WWNN can be obtained with the sysconfig
command at the filer console.
sapfiler1> sysconfig –v
…………
…………
slot 3: Fibre Channel Target Host Adapter 3a
(Dual-channel, QLogic 2312 (2342) rev. 2, 64-bit, )
Firmware rev: 3.1.15
Host Port Addr: 011000
Cacheline size: 8
SRAM parity: Yes
FC Nodename: 50:a9:80:00:02:00:88:f7 (50a98000020088f7)
FC Portname: 50:a9:80:03:02:00:88:f7 (50a98003020088f7)
Connection: PTP, Fabric
After using lputil command the lpfc.conf file has the following entry.
File /kernel/drv/lpfc.conf
…………
…………
# BEGIN: LPUTIL-managed Persistent Bindings
fcp-bind-WWNN="50a98000020088f7:lpfc0t1";
Network Appliance Inc. Proprietary
7
TECHNICAL REPORT
File /kernel/drv/lpfc.conf
…………
…………
# BEGIN: LPUTIL-managed Persistent Bindings
fcp-bind-WWNN="50a98000020088f7:lpfc0t1";
Edit entries in /kernel/drv/sd.conf
The LUN IDs that were used when mapping the LUNs to the initiator groups and the SCSI ID that was
used with the lputil command have to be used in the entries in /kernel/drv/sd.conf.
…………
…………
name="sd" parent="lpfc" target=1 lun=0;
name="sd" parent="lpfc" target=1 lun=1;
name="sd" parent="lpfc" target=1 lun=2;
name="sd" parent="lpfc" target=1 lun=3;
name="sd" parent="lpfc" target=1 lun=4;
name="sd" parent="lpfc" target=1 lun=5;
name="sd" parent="lpfc" target=1 lun=6;
name="sd" parent="lpfc" target=1 lun=7;
name="sd" parent="lpfc" target=1 lun=8;
name="sd" parent="lpfc" target=1 lun=9;

Reboot with reconfigure
After the new entries in /kernel/drv/sd.conf are edited, the host needs to be rebooted with the
reconfigure option. reboot -- -r

Configure new disks with format command
The new disks can now be configured with the format command.

VCS Adding node to the existing cluster

Adding the node to the existing cluster
Perform the tasks on one of the existing nodes in the cluster.
To add the new node to the existing cluster
1 Enter the command:
# haconf -makerw
2 Add the new system to the cluster:
# hasys -add east
3 Enter the following command:
# haconf -dump
4 Copy the main.cf file from an existing node to your new node:
# rcp /etc/VRTSvcs/conf/config/main.cf east:/etc/VRTSvcs/conf/
config/
5 Start VCS on the new node:
# hastart
6 If necessary, modify any new system attributes.
7 Enter the command:
# haconf -dump -makero
Start VCS after adding the new node to the cluster and verify the cluster.
To start VCS and verify the cluster
1 From the new system, start VCS with the new system added to the cluster:
# hastart
2 Run the GAB configuration command on each node to verify that Port a and
Port h include the new node in the membership:
# /sbin/gabconfig -a
GAB Port Memberships
===================================
Port a gen a3640003 membership 012
Port h gen fd570002 membership 012

Solaris emc powerpath link problem

If you boot a Solaris host with all socal host adapters to storage
system volumes disconnected or dysfunctional, PowerPath will not
configure any socal host adapter paths. After physically restoring the
socal connections, run the following commands to restore the paths
in PowerPath:

On hosts running this OS Run these commands
Solaris 7 and 8
devfsadm
powercf -q
powermt config
Solaris 2.6
drvconfig; disks; devlinks
powercf -q
powermt config

AIX configuring emc powerpath on bootdisk

This section describes the process for converting a system with AIX
installed on internal disks to boot from storage system logical
devices. The process first transfers a copy of the complete operating
system from an internal disk to logical devices on a storage system. It
then configures PowerPath so the root volume group takes advantage
of multipathing and failover capabilities. This is the recommended
process, as it allows you to revert to the internal disks in the event of
a problem.
Before you start:
 Ensure that the AIX alt_disk_install LPP is installed on the
system. The LPP is on the AIX installation CD.
 Apply the rte and boot_images filesets.
Then follow these steps:
1. Ensure that all device connections to the storage system are
established.
2. Ensure that all hdisks are configured properly
3. Run powermt config.
4. Use the rmdev command with the -d option to remove all
PowerPath devices, including the powerpath0 device. PowerPath
should remain installed, but all PowerPath devices must be
deleted.
5. Run lsdev -Ct power. No devices should be listed in the output.
6. Determine which hdisks on the storage system will receive the
copy of the operating system.
7. Run alt_install_disk -C hdisk_list to create the copy on the
storage system hdisk(s).
8. Reboot the system.
The system should boot using the hdisks specified in the previous
step.
9. Run powermt config.
10. Run bootlist -m normal -o to determine which hdisk is in the
bootlist.
11. Use powermt to determine which hdiskpower contains the hdisk
in the boot list.
12. Use the bootlist command to include all the path hdisks for the
hdiskpower found in the previous step.
13. Run pprootdev on.
14. Reboot the system.
When the system comes up, rootvg should be using hdiskpower
devices

Tuesday, May 27, 2008

Solaris vx removing duplicate devices from vxdisk list

vxdisk listDEVICE TYPE DISK GROUP STATUS
c7t21d0s2 sliced disk01 oradg online
c7t22d0s2 sliced disk02 oradg error
c7t22d0s2 sliced - - error
c7t23d0s2 sliced disk03 oradg online

vxdg -g oradg rmdisk disk02
vxdisk rm c7t22d0s2
vxdisk rm c7t22d0s2
devfsadm -C
vxdctl enable
vxdisk list

DEVICE TYPE DISK GROUP STATUS
c7t21d0s2 sliced disk01 oradg online
c7t22d0s2 sliced disk02 oradg online
c7t23d0s2 sliced disk03 oradg online

Details:
This specific procedure must be used when replacing one of the internal fibre drives within the following servers and/or arrays:

Sun Fire 280R, V480, and V880.
SENA A5X00 Arrays.

Note: Failure to follow this procedure could result in a duplicate device entry for the replaced disk in Volume Manager. This is most notable when running a vxdisk list command.

Example:

# vxdisk list
DEVICE TYPE DISK GROUP STATUS
c1t0d0s2 sliced rootdisk rootdg online
c1t1d0s2 sliced - - error
c1t1d0s2 sliced - - error


1. Select vxdiskadm option 4 - Select the Volume Manager disk to be replaced

2. luxadm -e offline - detach ssd instance

Use luxadm to get this disk out of the Solaris kernel configuration. The device path should end in ",raw" (for example, pci@1f,0/ide@d/dad@0,0:a,raw). This is the path from the /devices directory, not /dev/rdsk/c?t?d?s?.

* If the disk is multipathed, run the luxadm -e offline on the second path as well

3. devfsadm -C

The -C option cleans up the /dev directory, and removes any lingering logical links to the device link names. It should remove all the device paths for this particular disk. This can be verified with:

# ls -ld /dev/dsk/c1t1d* - This should return no devices entries for c1t1d*.


4. The drive can now be pulled physically

5. luxadm insert_device

This is an interactive command. It will go through the steps to insert the new device and create the necessary entries in the Solaris device tree.

6. vxdctl enable

This is for Volume Manager to rescan the disks. It should pick up the new disk with an "error" status. If not in error, the disk might contain some Volume Manager information, and might need to be formatted.

7. Select vxdiskadm option 5

This will start the recovery process (if needed).

Thursday, May 22, 2008

Solaris 220R init 6 hang

If you run init 6 on 220 and it hang, and you have to hard reset to reboot the system. This is what you should do. This system must have single cpu and it is installed on slot/module 2. Move it to slot/module 0 to solve the issue.
Example below with 220R with 2 cpu's for illustration
:~$ /usr/platform/sun4u/sbin/prtdiag
System Configuration: Sun Microsystems sun4u Sun Enterprise 220R (2 X
UltraSPARC-II 450MHz)
System clock frequency: 113 MHz
Memory size: 1024 Megabytes

============CPUs ===========

Run Ecache CPU CPU
Brd CPU Module MHz MB Impl. Mask
--- --- ------- ----- ------ ------ ----
0 0 0 450 4.0 US-II 10.0
0 2 2 450 4.0 US-II 10.0

Solaris ipmp example

Solaris IP Multi Path. Ethernet/IP layer redundancy w/o support from switch side.
Can run as active/standby (more compatible, only single IP presented to outside world), or active/active config (outbound traffic can go over both NIC using 2 IPs, inbound will depends on the IP the client use to send data back, so typically only 1 NIC).


hostname.ce0 (main active interface) ::
oaprod1-ce0 netmask + broadcast + deprecated -failover \
group oaprod_ipmp up \
addif oaprod1 netmask + broadcast + up

hostname.ce2 (active-standby config) ::
oaprod1-ce2 netmask + broadcast + deprecated -failover \
standby group oaprod_ipmp up
^^^^^^^

hostname.ce2 (active-active config) ::
oaprod1-ce2 netmask + broadcast + deprecated -failover \
group oaprod_ipmp up \
addif oaprod-nic2 netmask + broadcast + up

/etc/inet/hosts ::
172.27.3.71 oaprod1
172.27.3.72 oaprod1-ce0
172.27.3.73 oaprod1-ce2
172.27.3.74 oaprod2-nic2

Solaris bad superblock corrupt recovery

Find alternate superblocks

# newfs -N /dev/rdsk/c0t3d0s7
/dev/rdsk/c0t3d0s7: 163944 sectors in 506 cylinders of 9 tracks, 36 sectors
83.9MB in 32 cyl groups (16 c/g, 2.65MB/g, 1216 i/g)
super-block backups (for fsck -b #) at:
32, 5264, 10496, 15728, 20960, 26192, 31424, 36656, 41888,
47120, 52352, 57584, 62816, 68048, 73280, 78512, 82976, 88208,
93440, 98672, 103904, 109136, 114368, 119600, 124832, 130064, 135296,
140528, 145760, 150992, 156224, 161456,
Then, run
# fsck -F ufs -o b=5264 /dev/rdsk/c0t3d0s7
Alternate superblock location: 5264.
** /dev/rdsk/c0t3d0s7
** Last Mounted on
** Phase 1 - Check Blocks and Sizes
** Phase 2 - Check Pathnames
** Phase 3 - Check Connectivity
** Phase 4 - Check Reference Counts
** Phase 5 - Check Cyl groups
36 files, 867 used, 75712 free (16 frags, 9462 blocks, 0.0% fragmentation)
/dev/rdsk/c0t3d0s7 FILE SYSTEM STATE SET TO OKAY

***** FILE SYSTEM WAS MODIFIED *****
#
On Solaris 9
If the superblock in the root (/) file system becomes damaged and you cannot restore it, you have two choices:

1.Reinstall the system
2.Boot from the network or local CD, and attempt the above steps. If these steps fail, recreate the root (/) file system with the newfs command and restore it from a backup copy.

AIX superblock corrupt

Aix has 2 superblocks , one in logical block 1 and a copy in logical block 31
run beow to copy 31 into 1
#dd count=1 bs=4k skip=31 seek=1 if=/dev/hd4 of=/dev/hd4

Wednesday, May 21, 2008

Aix boot fails JFS/JFS2 log corrupt

If LED code 551,552,554,555,556,557
try access the rootvg file system before mounting it, and run #logform -V jfs /dev/hd8 and #logform -V jfs2 /dev/hdisk8 and run fsck afterwards
#fsck -y -V jfs /dev/hd1
#fsck -y -V jfs /dev/hd2
#fsck -y -V jfs /dev/hd3
#fsck -y -V jfs /dev/hd4
#fsck -y -V jfs /dev/hd9var
#fsck -y -V jfs /dev/hd10opt
and do the same for jfs2
then #exit

** previous transaction will be lost

AIX fix corrupted boot logical volume

Boot from cdrom or NIM(F1 or #1 to set SMS options)
>Maintenance
>> 1 Access a Root Volume Group
>>> select hd5
#bosboot -ad /dev/hdisk0
#shutdown -Fr

if you ever need to create hd5 then,
Boot from cdrom or NIM(F1 or #1 to set SMS options)
>Maintenance
>> 1 Access a Root Volume Group
>>> select hd5
#rmlv hd5
#chpv -c hdisk0
#mklv -y hd5 -t boot -a e rootvg 1
#bosboot -ad /dev/hdisk0
#bootlist -m normal -0
#sync
#sync
#shutdown -Fr

AIX data not managed by ODM

These are files not managed by ODM
/etc/filesystems
/etc/security
/etc/passwd
/etc/quecf

Tuesday, May 20, 2008

AIX restart sendmail

This starts sendmail on AIX
startsrc -s sendmail -a "-bd -q30m"

/etc/mail
# more sendmail.pid
123468
sendmail -bd -q30m
# ps -ef | grep sendmail
root 123468 1 0 Oct 26 - 3:15 sendmail: accepting connections
smmsp 283660 14700 0 Oct 26 - 0:00 /usr/lib/sendmail
root 304634 273842 1 23:54:43 pts/1 0:00 grep sendmail
# kill -15 `head -1 /etc/mail/sendmail.pid`
# ps -ef | grep sendmail
root 123494 273842 1 23:59:56 pts/1 0:00 grep sendmail
smmsp 283660 14700 0 Oct 26 - 0:00 /usr/lib/sendmail
# sendmail -bd -q30m
# ps -ef | grep sendmail

root 274584 273842 1 00:00:38 pts/1 0:00 grep sendmail
smmsp 283660 14700 0 Oct 26 - 0:00 /usr/lib/sendmail
root 318830 1 0 00:00:32 - 0:00 sendmail: accepting connections

# ps -ef | grep sendmail
root 274586 273842 1 00:00:49 pts/1 0:00 grep sendmail
smmsp 283660 14700 0 Oct 26 - 0:00 /usr/lib/sendmail
root 318830 1 0 00:00:32 - 0:00 sendmail: accepting connections

# ls -l sendmail.pid
-rw------- 1 root 1586 26 Dec 03 00:00 sendmail.pid
# more sendmail.pid
318830
sendmail -bd -q30m

Sendmail uuencode attachment

Use this example to send email attachment via mailx
#uuencode /content/05070000001.pdf 05070000001.pdf mailx -v -s 'SubjectLine' you@yahoo.com

AWK by example

# count lines (emulates "wc -l")
awk 'END{print NR}'

# print the sums of the fields of every line
awk '{s=0; for (i=1; i<=NF; i++) s=s+$i; print s}'

# add all fields in all lines and print the sum
awk '{for (i=1; i<=NF; i++) s=s+$i}; END{print s}'

# print the total number of fields ("words") in all lines
awk '{ total = total + NF }; END {print total}' file

# print every line where the value of the last field is > 4
awk '$NF > 4'

# substitute "foo" with "bar" ONLY for lines which contain "baz"
awk '/baz/{gsub(/foo/, "bar")};{print}'

# substitute "foo" with "bar" EXCEPT for lines which contain "baz"
awk '!/baz/{gsub(/foo/, "bar")};{print}'

# switch the first 2 fields of every line
awk '{temp = $1; $1 = $2; $2 = temp}' file

# print line number 52
awk 'NR==52'
awk 'NR==52 {print;exit}' # more efficient on large files

# print section of file between two regular expressions (inclusive)
awk '/Iowa/,/Montana/' # case sensitive

# delete ALL blank lines from a file (same as "grep '.' ")
awk NF
awk '/./'

SED by example

# substitute (find and replace) "foo" with "bar" on each line
sed 's/foo/bar/' # replaces only 1st instance in a line
sed 's/foo/bar/4' # replaces only 4th instance in a line
sed 's/foo/bar/g' # replaces ALL instances in a line
sed 's/\(.*\)foo\(.*foo\)/\1bar\2/' # replace the next-to-last case sed 's/\(.*\)foo/\1bar/' # replace only the last case

# substitute "foo" with "bar" ONLY for lines which contain "baz"
sed '/baz/s/foo/bar/g'

# substitute "foo" with "bar" EXCEPT for lines which contain "baz"
sed '/baz/!s/foo/bar/g'


# join pairs of lines side-by-side (like "paste")
sed '$!N;s/\n/ /'


# change "scarlet" or "ruby" or "puce" to "red"
sed 's/scarlet/red/g;s/ruby/red/g;s/puce/red/g' # most seds
gsed 's/scarlet\ruby\puce/red/g' # GNU sed only


# print only lines which match regular expression (emulates "grep") sed -n '/regexp/p' # method 1
sed '/regexp/!d' # method 2


# print the line immediately before a regexp, but not the line
# containing the regexp
sed -n '/regexp/{g;1!p;};h'


# grep for AAA and BBB and CCC (in any order)
sed '/AAA/!d; /BBB/!d; /CCC/!d'

# grep for AAA and BBB and CCC (in that order)
sed '/AAA.*BBB.*CCC/!d'

# print section of file based on line numbers (lines 8-12, inclusive)
sed -n '8,12p' # method 1
sed '8,12!d' # method 2

# print line number 52
sed -n '52p' # method 1
sed '52!d' # method 2
sed '52q;d' # method 3, efficient on large files

# delete ALL blank lines from a file (same as "grep '.' ")
sed '/^$/d' # method 1
sed '/./!d' # method 2


Thursday, May 15, 2008

Solaris one liner backup

--> move a directory to another server
tar cf - ./games rsh brucey cd /tmp\; tar xvBpf -
--> move a directory
tar cf - ./games (cd /tmp; tar xvBpf - )
--> dump to zip
ufsdump 0f - /filesystem /opt/local/gzip - > /tmp/dump.gz
--> backup one liner
tar cvf - /home/ebs gzip - > ebs.tar.gz
--> encrypt filename 1 and output to 1.crypt file
crypt <> 1.crypt ; rm 1
--> decrypt filename 1.crypt and stdout to screen
crypt <>
--> clever way to archive
tar cvf - `find . –print` >/tmp/dumpfile.tar
tar xvf - selectively extract from a tar archive
tar xvf /tmp/iona.tar ./iona/.sh_history

Solaris mounting and sharing cdrom

restart volmgt daemon
# pkill vold && /usr/sbin/vold &

check which is the cdrom drive
% iostat -En

c1t0d0 Soft Errors: 149 Hard Errors: 0 Transport Errors: 0
Vendor: MATSHITA Product: CDRW/DVD UJDA740 Revision: 1.00 Serial No:
Size: 0.56GB <555350016>
Media Error: 0 Device Not Ready: 0 No Device: 0 Recoverable: 0
Illegal Request: 149 Predictive Failure Analysis: 0

mount the cdrom manually if vold fails to mount it
mount -F hsfs -o ro /dev/dsk/c1t0d0s2 /cdrom

nfs shares cdrom drive
edit /etc/dfs/dfstab, put in below line or just run it
share -F nfs -o ro /cdrom

run dfshares to see if cdrom is shared
if not run /etc/init.d/nfs.server stopstart

on remote mahine run
mount servername:/cdrom /mnt

if want to umount /mnt run this first
fuser -cu /mnt , to see pid using the /mnt
fuser -ck /mnt , will kill all processes holding the cdrom

Wednesday, May 14, 2008

Solaris American English Unicode (en_US.UTF-8) Full Locale

Beginning with Solaris 8, all Unicode locales benefit from Asian locale's native Asian input systems and methods. To use Asian native input methods in Unicode locales, you must install the Asian locales that you want to use. For example, if you want to use Japanese input systems in Unicode locales, you must install at least one of the available Japanese locales.

This locale should be already on your system if you've installed "End User System Support" meta-cluster or added an upper meta-cluster such as "Developer System Support" or "Entire Distribution" during the installation. To find out if you have the locale, run the command

% ls /usr/lib/locale/en_US.UTF-8/en_US.UTF-8.so.1
If the file is in the directory, the locale is in place. If the file is not in the directory then, add the packages below.
The packages in the two lists below can be found on the Solaris Software 1 of 2 CD in the directory .../Solaris_8/Product/:

SPARC platform

SUNWarrf SUNWeugrf SUNWi2rf SUNWi4rf SUNWi5rf SUNWi7rf SUNWi8rf SUNWi9rf SUNWi13rf SUNWi15rf SUNWtxfnt SUNW5xmft SUNWcxmft SUNWjxmft SUNWkxmft SUNWeudba SUNWeudbd SUNWeudda SUNWeudhr SUNWeudhs SUNWeudis SUNWeudiv SUNWeudlg SUNWeudmg SUNWeuezt SUNWeuluf SUNWeulux SUNWeuodf SUNWeusru SUNWeuxwe SUNWuiu8 SUNWuiu8x SUNWuium SUNWulcf SUNWulcfx SUNWulocf SUNWuxlcf SUNWuxlcx

Solaris check if patch is installed

/var/sadm/patch contains the list of installed patched. The date of the creation of a directory for a particular patch is actually the date of patch installation.

or

#showrev -p grep 116268

Solaris vxfs verify large file support

how to verify and enable largefile support on a vxfs filesystem

Description

To verify if largefile support is enabled on a VXFS filesystem:

# fsadm -F vxfs /dir_name

If you need to enable largefile support:

# fsadm -F vxfs -o largefiles /dir_name

Example

fsadm -F vxfs /dir_name; fsadm -F vxfs -o largefiles /dir_name

Tuesday, May 13, 2008

HP-UX disk and filesystem tasks

Search for attached disk
ioscan -fnC disk
Initialize a disk for use with LVM
pvcreate -f /dev/rdsk/c0t1d0
Create the device structure needed for a new volume group.
cd /dev
mkdir vgdata
cd vgdata
mknod group c 64 0x010000
Create volume group vgdata
vgcreate vgdata /dev/dsk/c0t1d0
{ if your expecting to use more than 16 physical disks use the -p option, range from 1 to 256 disks. }
Display volume group vgdata
vgdisplay -v vg01
Add another disk to volume group
pvcreate -f /dev/rdsk/c0t4d0
vgextend vg01 /dev/dsk/c0t4d0
Remove disk from volume group
vgreduce vg01 /dev/dsk/c0t4d0
Create a 100 MB logical volume lvdata
lvcreate -L 100 -n lvdata vgdata
newfs -F vxfs /dev/vgdata/rlvdata
Extend logical volume to 200 MB
lvextend -L 200 /dev/vgdata/lvdata
Extend file system to 200 MB
{ if you don't have Online JFS installed volumes must be unmounted before you can extend the file system. }
fuser -ku /dev/vgdata/lvdata { kill all process that has open files on this volume. }
umount /dev/vgdata/lvdata
extendfs /data

{ for Online JFS, 200 MB / 4 MB = 50 LE; 50 x 1024 = 51200 blocks }
fsadm -F vxfs -b 51200 /data
Set largefiles to support files greater than 2GB
fsadm -F vxfs -o largefiles /data

Exporting and Importing disks across system.

1. make the volume group unavailable
vgchange -a n /dev/vgdata
2. Export the the disk while creating a logical volume map file.
vgexport -v -m data_map vgdata
3. Disconnect the drives and move to new system.
4. Move the data_map file to the new system.
5. On the new system recreate the volume group directory
mkdir /dev/vgdata
mknod /dev/vgdata/group c 64 0x02000
6. Import the disks to the new system
vgimport -v -m data_map /dev/vgdata /dev/dsk/c2t1d0 /dev/dsk/c2t2d0
7. Enable the new volume group
vgchange -a y /dev/vgdata
Renaming a logical volume
/dev/vgdata/lvol1 -> /dev/vgdata/data_lv
umount /dev/vgdata/lvol1
ll /dev/vgdata/lvol1 take note of the minor ( e.g 0x010001 )
brw-r----- 1 root root 64 0x010001 Dec 31 17:59 lvol1
mknod /dev/vgdata/data_lv b 64 0x010001 create new logical volume name
mknod /dev/vgdata/rdata_lv c 64 0x010001
vi /etc/fstab { reflect the new logical volume }
mount -a
rmsf /dev/vgdata/lvol1
rmsf /dev/vgdata/rlvol1

Solaris which pid uses port

#!/bin/ksh
# find from a port the pid that started the port
#
line='-------------------------------------------------------------------------'
pids=`/usr/bin/ps -ef | sed 1d | awk '{print $2}'`

# Prompt users or use 1st cmdline argument
if [ $# -eq 0 ]; then
read ans?"Enter port you like to know pid for: "
else
ans=$1
fi

# Check all pids for this port, then list that process
for f in $pids
do
/usr/proc/bin/pfiles $f 2>/dev/null | /usr/xpg4/bin/grep -q "port: $ans"
if [ $? -eq 0 ] ; then
echo "$line\nPort: $ans is being used by PID: \c"
/usr/bin/ps -o pid -o args -p $f | sed 1d
fi
done
exit 0

AIX HPUX how to remove password

To remove password in AIX,
Edit /etc/security/passwd, change the password to *
myaot2:
password = *

To remove password requirements for HPUX, edit this file /tcb/files/auth/o/oracle;

oracle:u_name=oracle:u_id#600:\
:u_pwd=*:\
:u_auditid#67:\
:u_auditflag#1:\
:u_exp#3024000:u_life#4838400:u_succhg#1150751962:u_unsucchg#1150691460:\
:u_pw_expire_warning#604800:u_pswduser=oracle:u_nullpw:u_pwchanger=root:\
:u_suclog#1150752396:u_suctty=/dev/pts/1:u_unsuclog#1150692078:u_unsuctty=ssh:\
:u_lock@:chkent:

Change value u_pwd=*:\

You also have to disable password aging for the specific account in SAM. Password access will be revoked & it will not lock the account then

AIX print queue command

To Check AIX Print Queue

#qchk -q -P HOUOSP0P350

Queue Dev Status Job Files User PP % Blks Cp Rnk

------- ----- --------- --- ------------------ ---------- ---- -- ----- --- ---

HOUOSP0 @houh READY

HOUOSP0P350:

HOUOSP0P350:

HOUOSP0P350:

HOUOSP0P350:

HOUOSP0P350:


PRINTERS / PRINT QUEUES
--------------------------------------------------------------------------------

splp (device) Displays/changes printer driver settings
splp /dev/lp0

export $LPDEST="pqname" Set default printer queue for login session

lsvirprt Lists/changes virtual printer attributes.

lsallq Displays all queues

rmvirprt -q queuename -d queuedevice Removes a virtual printer

qpri -#(job No) -a(new priority) Change a queue job priority.

qhld -#(job No) Put a hold on hold
qhld -r #(job No) Release a held job

qchk -A Status of jobs in queues
lpstat
lpstat -p(queue) Status of jobs in a named queue

qcan -x (job No) Cancel a job from a queue
cancel (job No)

enq -U -P(queue) Enable a queue
enable (queue)

enq -D -P(queue) Disable a queue
disable (queue)

qmov -m(new queue) -#(job No) Move a job to another queue

startsrc -s qdaemon Start qdaemon sub-system
lssrc -s qdaemon List status of qdaemon sub-system
stop -s qdaemon Stop qdaemon sub-system

Solaris find sun fibre channel device

# more luxadm_probe_-p.out

No Network Array enclosures found in /dev/es

Found Fibre Channel device(s):

Node WWN:5005076300c09f4b Device Type:Disk device
Logical Path:/dev/rdsk/c6t6005076300C09F4B000000000000100Dd0s2
Physical Path:
/devices/scsi_vhci/ssd@g6005076300c09f4b000000000000100d:c,raw
Node WWN:5005076300c09f4b Device Type:Disk device
Logical Path:/dev/rdsk/c6t6005076300C09F4B000000000000102Cd0s2
Physical Path:
/devices/scsi_vhci/ssd@g6005076300c09f4b000000000000102c:c,raw
Node WWN:5005076300c09f4b Device Type:Disk device
Logical Path:/dev/rdsk/c6t6005076300C09F4B000000000000102Ed0s2
Physical Path:
/devices/scsi_vhci/ssd@g6005076300c09f4b000000000000102e:c,raw

# format

Searching for disks...done
AVAILABLE DISK SELECTIONS:

0. c0t0d0
/pci@1f,4000/scsi@3/sd@0,0
1. c0t1d0
/pci@1f,4000/scsi@3/sd@1,0
2. c0t2d0
/pci@1f,4000/scsi@3/sd@2,0
3. c0t3d0
/pci@1f,4000/scsi@3/sd@3,0
4. c2t0d0
/pci@6,4000/scsi@4/sd@0,0
5. c2t1d0
/pci@6,4000/scsi@4/sd@1,0
6. c2t2d0
/pci@6,4000/scsi@4/sd@2,0
7. c2t3d0
/pci@6,4000/scsi@4/sd@3,0
8. c6t6005076300C09F4B000000000000100Dd0
/scsi_vhci/ssd@g6005076300c09f4b000000000000100d

9. c6t6005076300C09F4B000000000000102Cd0
/scsi_vhci/ssd@g6005076300c09f4b000000000000102c

10. c6t6005076300C09F4B000000000000102Ed0
/scsi_vhci/ssd@g6005076300c09f4b000000000000102e


Specify disk (enter its number): ^D

UNIX: How to print column nicely using printf

[user@hostfwnms1-oam tmp]# cat b.sh printf "%-26s %-19s %-8s %-8s %-s %-s\n" HOSTNAME IP PING SNMPWALK 0-ok 1-fail for i in `cat n...