# svcs | grep auto
legacy_run Oct_24 lrc:/etc/rc2_d/S72autoinstall
online 17:31:10 svc:/system/filesystem/autofs:default
To stop
# svcadm -v disable svc:/system/filesystem/autofs:default
To start
# svcadm -v enable svc:/system/filesystem/autofs:default
Sunday, September 21, 2008
Friday, September 19, 2008
HPUX GSP toggle between console and command mode
CO : COnsole- leave command mode and return to console mode
This command exits the GSP command interface, and connects to the system console. All mirrored users are switched. Type CTRL-B to return to the GSP command interface.
This command exits the GSP command interface, and connects to the system console. All mirrored users are switched. Type CTRL-B to return to the GSP command interface.
HPUX GSP Console get write access
Typing "CO" from the GSP Command interface provides a mirrored version of the OS console. All mirrored users see the same output. At any time, only one of the mirrored users has write access to the console. To get write access to the console, type CTRL-e c f (not CTRL-e CTRL-c CTRL-f).
SEE ALSO: CO (COnsole)
SEE ALSO: CO (COnsole)
Solaris when LUN dynamically removed JNI
When the driver dynamically removes a LUN, not all references to that
LUN are removed. This is because the file system nodes in the
/dev/dsk and /dev/rmt directories are scanned to show the devices.
However, when devices are removed dynamically via EZ Fibre or the
jnic146x_update_drv command, only the kernel device nodes are
removed. To remove the actual file system nodes, execute the
following command:
#devfsadm -C -v
LUN are removed. This is because the file system nodes in the
/dev/dsk and /dev/rmt directories are scanned to show the devices.
However, when devices are removed dynamically via EZ Fibre or the
jnic146x_update_drv command, only the kernel device nodes are
removed. To remove the actual file system nodes, execute the
following command:
#devfsadm -C -v
Solaris save format output to file
# echo|format > format.out
This is useful when you have new disk/lun, and lazy to find out which one is new
And make it easy if you wnat to execute command as below
#metattach d017 /dev/dsk/c6t60060E8004F25E000000F25E000009E1d0
root@hostorap1 # grep -i warning /var/adm/messages
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000623 (ssd34):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000622 (ssd35):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000621 (ssd36):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000620 (ssd37):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061f (ssd38):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061e (ssd39):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061d (ssd40):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061c (ssd41):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061b (ssd42):
Sep 18 10:07:29 hostorap1 scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061a (ssd43):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000619 (ssd44):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000618
root@host # more format.out
Searching for disks...done
c6t60060E8004F25E000000F25E000009E1d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E2d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E3d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E4d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Ad0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Bd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Cd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Dd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Ed0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Fd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000618d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000619d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000620d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000621d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000622d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000623d0: configured with capacity of 25.28GB
AVAILABLE DISK SELECTIONS:
0. c0t0d0
/ssm@0,0/pci@18,600000/pci@2/scsi@2/sd@0,0
1. c1t0d0
/ssm@0,0/pci@1c,600000/pci@2/scsi@2,1/sd@0,0
2. c6t60060E8004F25E000000F25E00000DE8d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000de8
3. c6t60060E8004F25E000000F25E00000DE9d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000de9
4. c6t60060E8004F25E000000F25E00000DEAd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000dea
5. c6t60060E8004F25E000000F25E00000DEBd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000deb
6. c6t60060E8004F25E000000F25E00000EA0d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea0
7. c6t60060E8004F25E000000F25E00000EA1d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea1
8. c6t60060E8004F25E000000F25E00000EA2d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea2
9. c6t60060E8004F25E000000F25E00000EA3d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea3
10. c6t60060E8004F25E000000F25E00000EA4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea4
11. c6t60060E8004F25E000000F25E00000EA5d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea5
12. c6t60060E8004F25E000000F25E00000EA6d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea6
13. c6t60060E8004F25E000000F25E00000EA7d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea7
14. c6t60060E8004F25E000000F25E000004A4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a4
15. c6t60060E8004F25E000000F25E000004A5d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a5
16. c6t60060E8004F25E000000F25E000004A6d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a6
17. c6t60060E8004F25E000000F25E000004A7d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a7
18. c6t60060E8004F25E000000F25E000009E1d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e1
19. c6t60060E8004F25E000000F25E000009E2d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e2
20. c6t60060E8004F25E000000F25E000009E3d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e3
21. c6t60060E8004F25E000000F25E000009E4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e4
22. c6t60060E8004F25E000000F25E0000037Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037a
23. c6t60060E8004F25E000000F25E0000037Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037b
24. c6t60060E8004F25E000000F25E0000037Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037c
25. c6t60060E8004F25E000000F25E0000037Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037d
26. c6t60060E8004F25E000000F25E0000037Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037e
27. c6t60060E8004F25E000000F25E0000037Fd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037f
28. c6t60060E8004F25E000000F25E0000061Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061a
29. c6t60060E8004F25E000000F25E0000061Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061b
30. c6t60060E8004F25E000000F25E0000061Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061c
31. c6t60060E8004F25E000000F25E0000061Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061d
32. c6t60060E8004F25E000000F25E0000061Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061e
33. c6t60060E8004F25E000000F25E0000061Fd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061f
34. c6t60060E8004F25E000000F25E0000097Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097a
35. c6t60060E8004F25E000000F25E0000097Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097b
36. c6t60060E8004F25E000000F25E0000097Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097c
37. c6t60060E8004F25E000000F25E0000097Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097d
38. c6t60060E8004F25E000000F25E0000097Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097e
39. c6t60060E8004F25E000000F25E00000378d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000378
40. c6t60060E8004F25E000000F25E00000379d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000379
41. c6t60060E8004F25E000000F25E00000380d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000380
42. c6t60060E8004F25E000000F25E00000381d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000381
43. c6t60060E8004F25E000000F25E00000382d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000382
44. c6t60060E8004F25E000000F25E00000383d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000383
45. c6t60060E8004F25E000000F25E00000618d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000618
46. c6t60060E8004F25E000000F25E00000619d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000619
47. c6t60060E8004F25E000000F25E00000620d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000620
48. c6t60060E8004F25E000000F25E00000621d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000621
49. c6t60060E8004F25E000000F25E00000622d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000622
50. c6t60060E8004F25E000000F25E00000623d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000623
51. c6t60060E8004F25E000000F25E00000979d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000979
Specify disk (enter its number):
root@host #
This is useful when you have new disk/lun, and lazy to find out which one is new
And make it easy if you wnat to execute command as below
#metattach d017 /dev/dsk/c6t60060E8004F25E000000F25E000009E1d0
root@hostorap1 # grep -i warning /var/adm/messages
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000623 (ssd34):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000622 (ssd35):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000621 (ssd36):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000620 (ssd37):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061f (ssd38):
Sep 18 10:07:28 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061e (ssd39):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061d (ssd40):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061c (ssd41):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061b (ssd42):
Sep 18 10:07:29 hostorap1 scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e0000061a (ssd43):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000619 (ssd44):
Sep 18 10:07:29 host scsi: [ID 107833 kern.warning] WARNING: /scsi_vhci/ssd@g60060e8004f25e000000f25e00000618
root@host # more format.out
Searching for disks...done
c6t60060E8004F25E000000F25E000009E1d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E2d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E3d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E000009E4d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Ad0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Bd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Cd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Dd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Ed0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E0000061Fd0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000618d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000619d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000620d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000621d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000622d0: configured with capacity of 25.28GB
c6t60060E8004F25E000000F25E00000623d0: configured with capacity of 25.28GB
AVAILABLE DISK SELECTIONS:
0. c0t0d0
/ssm@0,0/pci@18,600000/pci@2/scsi@2/sd@0,0
1. c1t0d0
/ssm@0,0/pci@1c,600000/pci@2/scsi@2,1/sd@0,0
2. c6t60060E8004F25E000000F25E00000DE8d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000de8
3. c6t60060E8004F25E000000F25E00000DE9d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000de9
4. c6t60060E8004F25E000000F25E00000DEAd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000dea
5. c6t60060E8004F25E000000F25E00000DEBd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000deb
6. c6t60060E8004F25E000000F25E00000EA0d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea0
7. c6t60060E8004F25E000000F25E00000EA1d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea1
8. c6t60060E8004F25E000000F25E00000EA2d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea2
9. c6t60060E8004F25E000000F25E00000EA3d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea3
10. c6t60060E8004F25E000000F25E00000EA4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea4
11. c6t60060E8004F25E000000F25E00000EA5d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea5
12. c6t60060E8004F25E000000F25E00000EA6d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea6
13. c6t60060E8004F25E000000F25E00000EA7d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000ea7
14. c6t60060E8004F25E000000F25E000004A4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a4
15. c6t60060E8004F25E000000F25E000004A5d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a5
16. c6t60060E8004F25E000000F25E000004A6d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a6
17. c6t60060E8004F25E000000F25E000004A7d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000004a7
18. c6t60060E8004F25E000000F25E000009E1d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e1
19. c6t60060E8004F25E000000F25E000009E2d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e2
20. c6t60060E8004F25E000000F25E000009E3d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e3
21. c6t60060E8004F25E000000F25E000009E4d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e000009e4
22. c6t60060E8004F25E000000F25E0000037Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037a
23. c6t60060E8004F25E000000F25E0000037Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037b
24. c6t60060E8004F25E000000F25E0000037Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037c
25. c6t60060E8004F25E000000F25E0000037Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037d
26. c6t60060E8004F25E000000F25E0000037Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037e
27. c6t60060E8004F25E000000F25E0000037Fd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000037f
28. c6t60060E8004F25E000000F25E0000061Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061a
29. c6t60060E8004F25E000000F25E0000061Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061b
30. c6t60060E8004F25E000000F25E0000061Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061c
31. c6t60060E8004F25E000000F25E0000061Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061d
32. c6t60060E8004F25E000000F25E0000061Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061e
33. c6t60060E8004F25E000000F25E0000061Fd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000061f
34. c6t60060E8004F25E000000F25E0000097Ad0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097a
35. c6t60060E8004F25E000000F25E0000097Bd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097b
36. c6t60060E8004F25E000000F25E0000097Cd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097c
37. c6t60060E8004F25E000000F25E0000097Dd0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097d
38. c6t60060E8004F25E000000F25E0000097Ed0
/scsi_vhci/ssd@g60060e8004f25e000000f25e0000097e
39. c6t60060E8004F25E000000F25E00000378d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000378
40. c6t60060E8004F25E000000F25E00000379d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000379
41. c6t60060E8004F25E000000F25E00000380d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000380
42. c6t60060E8004F25E000000F25E00000381d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000381
43. c6t60060E8004F25E000000F25E00000382d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000382
44. c6t60060E8004F25E000000F25E00000383d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000383
45. c6t60060E8004F25E000000F25E00000618d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000618
46. c6t60060E8004F25E000000F25E00000619d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000619
47. c6t60060E8004F25E000000F25E00000620d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000620
48. c6t60060E8004F25E000000F25E00000621d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000621
49. c6t60060E8004F25E000000F25E00000622d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000622
50. c6t60060E8004F25E000000F25E00000623d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000623
51. c6t60060E8004F25E000000F25E00000979d0
/scsi_vhci/ssd@g60060e8004f25e000000f25e00000979
Specify disk (enter its number):
root@host #
Solaris replicate copy disk structure partition
Make sure disk have same geometry.
Usefull if you wnat to add new disk and lazy to repartion new disk
# prtvtoc /dev/rdsk/c?t?d?s2 | fmthard -s - /dev/rdsk/c?t?d?s2
Usefull if you wnat to add new disk and lazy to repartion new disk
# prtvtoc /dev/rdsk/c?t?d?s2 | fmthard -s - /dev/rdsk/c?t?d?s2
Thursday, September 18, 2008
HPUX 9000/800/rp3440 how to power off and on
1- Login to the OS as root and issue #init 0
2- Login to GSP
Disable all types of remote access (see SA command)
*************************************************************************
*************************************************************************
Your Certificate is expired.
Use the SO command to generate a new certificate.
*************************************************************************
MP MAIN MENU:
CO: Console
VFP: Virtual Front Panel
CM: Command Menu
CL: Console Log
SL: Show Event Logs
HE: Main Help Menu
X: Exit Connection
[Server-rib] MP> CM
[Server-rib] MP:CM> pc -off
PC -off
System will be powered off.
You must shut down the OS manually before this command is executed.
Failure to do this can cause problems when the OS is restarted.
Confirm? (Y/[N]): y
y
-> System is being powered off.
-> Command successful.
[Server-rib] MP:CM> PC -on
Usage: PC [ -on | -off | -cycle ] [ -nc ]
PC -?
2- Login to GSP
Disable all types of remote access (see SA command)
*************************************************************************
*************************************************************************
Your Certificate is expired.
Use the SO command to generate a new certificate.
*************************************************************************
MP MAIN MENU:
CO: Console
VFP: Virtual Front Panel
CM: Command Menu
CL: Console Log
SL: Show Event Logs
HE: Main Help Menu
X: Exit Connection
[Server-rib] MP> CM
[Server-rib] MP:CM> pc -off
PC -off
System will be powered off.
You must shut down the OS manually before this command is executed.
Failure to do this can cause problems when the OS is restarted.
Confirm? (Y/[N]): y
y
-> System is being powered off.
-> Command successful.
[Server-rib] MP:CM> PC -on
Usage: PC [ -on | -off | -cycle ] [ -nc ]
PC -?
Wednesday, September 17, 2008
Solaris find out what type of lun hardware and command to check it out
Solaris find out what type of lun hardware and command to check it out
HDS #lunstat
format output
10. c4t98d6 DGC-RAID5-0219 cyl 40958 alt 2 hd 128 sec 10
/pci@9,600000/lpfc@1/sd@62,6
HP XP #xpinfo
format output
10. c2t4d10 HP-OPEN-E-SUN-2108 cyl 19757 alt 2 hd 15 sec 96
/pci@3,2000/fibre-channel@2/sd@4,a
To detect new luns
#cfgadm -al -o show_SCSI_LUN
#devfsadm
check messages for corrupt label, that's th new disk/lun
JNI
---------------------------------------------------------------------------------------
Storage Systems
* Hitachi USP V, USP, NSC, AMS, WMS Series
* Hitachi Lightning™ 9900 and 9900 V Series
* Hitachi Thunder™ 9200 and 9500 V Series
* EMC Symmetrix™ 4.8, 5.0, 5.5
* EMC DMX 800, 1000, 2000, 3000
* EMC CLARiiON™ CX-Series, FC-4700
* HP XP Series 48, 128, 512, 1024, 10000, 12000
* HP EVA 3000, 4000, 5000, 6000, 8000
* HP MSA 1000, 1500
* HP HSG
* HP NAS, All-in-One Storage NAS
* HP EFS Clustered NAS Gateway
* HP ESL, EML tape libraries
* IBM DS4000 Series (Formerly FAStT Series)
* IBM DS4500, DS4800
* IBM ESS 800, F20
* IBM DS6000, DS6800, DS8000, DS8100, DS8300
* IBM 3581, 3582, 3583, and 3584 tape libraries
* LSI Engenio E2600, E4600, E5600, E6000
* NetApp FAS, V-Series, NearStore with Data ONTAP 6.5-7.1
* 3PAR InServ
* SGI InfiniteStorage TP Series
* Sun StorageTek 9900 , 9985, 9990
* Sun StorageTek 3510, 3511, 6130, 6140, 6540, 6920, 6940
* Sun StorageTek NAS 5210, 5310, 5320
* Sun (Formerly StorageTek) Flexline™ 200/300 Series
* Xiotech Magnitude 3D
Host Bus Adapters (HBAs)
Various HBA series and models from each of the following HBA vendors:
* Emulex
* HP
* IBM
* AMCC (Formerly JNI)
* LSI
* Qlogic
* Sun
HDS #lunstat
format output
10. c4t98d6 DGC-RAID5-0219 cyl 40958 alt 2 hd 128 sec 10
/pci@9,600000/lpfc@1/sd@62,6
HP XP #xpinfo
format output
10. c2t4d10 HP-OPEN-E-SUN-2108 cyl 19757 alt 2 hd 15 sec 96
/pci@3,2000/fibre-channel@2/sd@4,a
To detect new luns
#cfgadm -al -o show_SCSI_LUN
#devfsadm
check messages for corrupt label, that's th new disk/lun
JNI
configuration files
/kernel/drv/
-
jnic146x.conf
-
jnic/conf
-
fcaw.conf
-
fca-pci.conf
code diag commands (from OBP)
-
fce-test - test the emerald chip
-
loop-init - verifies loop
-
fcd-loop - veifies loop on dual port adaper
-
prt-cfg - configure the adapters port
-
set-bootn-wwn - set the wwn of the boot disk
How to verify firmware level (from Solaris)
#prtconf -cp |grep fcode
How to verify presence of adapters in os
#prtconf -v |grep -i JNI
#prtconf -v |grep -i FCA
#modinfo | grep -i jni
---------------------------------------------------------------------------------------
Storage Systems
* Hitachi USP V, USP, NSC, AMS, WMS Series
* Hitachi Lightning™ 9900 and 9900 V Series
* Hitachi Thunder™ 9200 and 9500 V Series
* EMC Symmetrix™ 4.8, 5.0, 5.5
* EMC DMX 800, 1000, 2000, 3000
* EMC CLARiiON™ CX-Series, FC-4700
* HP XP Series 48, 128, 512, 1024, 10000, 12000
* HP EVA 3000, 4000, 5000, 6000, 8000
* HP MSA 1000, 1500
* HP HSG
* HP NAS, All-in-One Storage NAS
* HP EFS Clustered NAS Gateway
* HP ESL, EML tape libraries
* IBM DS4000 Series (Formerly FAStT Series)
* IBM DS4500, DS4800
* IBM ESS 800, F20
* IBM DS6000, DS6800, DS8000, DS8100, DS8300
* IBM 3581, 3582, 3583, and 3584 tape libraries
* LSI Engenio E2600, E4600, E5600, E6000
* NetApp FAS, V-Series, NearStore with Data ONTAP 6.5-7.1
* 3PAR InServ
* SGI InfiniteStorage TP Series
* Sun StorageTek 9900 , 9985, 9990
* Sun StorageTek 3510, 3511, 6130, 6140, 6540, 6920, 6940
* Sun StorageTek NAS 5210, 5310, 5320
* Sun (Formerly StorageTek) Flexline™ 200/300 Series
* Xiotech Magnitude 3D
Host Bus Adapters (HBAs)
Various HBA series and models from each of the following HBA vendors:
* Emulex
* HP
* IBM
* AMCC (Formerly JNI)
* LSI
* Qlogic
* Sun
Solaris 10 patching zones
How to Apply a Patch to the Global Zone Only
global# patchadd -G patch_id
How to Apply a Patch to the Global Zone and All Non-Global Zones
global# patchadd patch_id
How to Apply a Patch to a Specified Non-Global Zone Only
To apply a patch to a specified non-global zone only,
the SUNW_PKG_ALLZONES package parameter for all packages
in the patch set must be set to false.
local-zone# patchadd patch_id
How to Remove a Patch From the Global Zone and All Non-Global Zones
global# patchrm patch_id
How to Remove a Patch From a Specified Non-Global Zone Only
To remove a patch from a specified non-global zone only,
the SUNW_PKG_ALLZONES package parameter for all packages
in the patch set must be set to false.
my-zone# patchrm patch_id
global# patchadd -G patch_id
How to Apply a Patch to the Global Zone and All Non-Global Zones
global# patchadd patch_id
How to Apply a Patch to a Specified Non-Global Zone Only
To apply a patch to a specified non-global zone only,
the SUNW_PKG_ALLZONES package parameter for all packages
in the patch set must be set to false.
local-zone# patchadd patch_id
How to Remove a Patch From the Global Zone and All Non-Global Zones
global# patchrm patch_id
How to Remove a Patch From a Specified Non-Global Zone Only
To remove a patch from a specified non-global zone only,
the SUNW_PKG_ALLZONES package parameter for all packages
in the patch set must be set to false.
my-zone# patchrm patch_id
Solaris 10 How to Add a Package
How to Add a Package to a Specified Non-Global Zone Only
local-zone# pkgadd -d /dir package_name
How to Add a Package to the Global Zone and All Non-Global Zones
global# pkgadd -d /dir package_name
How to Add a Package to the Global Zone Only
global# pkgadd -d /dir -G package_name
local-zone# pkgadd -d /dir package_name
How to Add a Package to the Global Zone and All Non-Global Zones
global# pkgadd -d /dir package_name
How to Add a Package to the Global Zone Only
global# pkgadd -d /dir -G package_name
Solaris 10 modify the network configuration of a running zone
To add:
global# ifconfig bge0 addif 192.168.200.202 zone myzone
To remove:
global# ifconfig bge0 removeif 192.168.200.202
global# ifconfig bge0 addif 192.168.200.202 zone myzone
To remove:
global# ifconfig bge0 removeif 192.168.200.202
Tuesday, September 16, 2008
Solaris 10 login to zone console
login to global zone
# zoneadm list -vc
ID NAME STATUS PATH
0 global running /
1 zone1 running /zone/1
# zlogin -C -e\@ zone1
[Connected to zone 'zone1' console]
zone1 console login:
zone1 console login:
zone1 console login: @.
[Connection to zone 'zone1' console closed]
#
# zoneadm list -vc
ID NAME STATUS PATH
0 global running /
1 zone1 running /zone/1
# zlogin -C -e\@ zone1
[Connected to zone 'zone1' console]
zone1 console login:
zone1 console login:
zone1 console login: @.
[Connection to zone 'zone1' console closed]
#
VXVM: quickly mirroring an empty volume
Use the command below to create empty mirrored volume
# vxassist make newvol 10m layout=concat-mirror init=active disk1 disk2
# vxassist make newvol 10m layout=concat-mirror init=active disk1 disk2
Friday, September 12, 2008
Solaris v440 replace failed power supply
Need to login to System Controller
sc>
SC Alert: PSU @ PS1 has FAILED.
sc> removefru PS1
Are you sure you want to remove PS1 [y/n]? y
sc>
SC Alert: PSU @ PS1 has FAILED.
SC Alert: PSU @ PS1 has FAILED.
SC Alert: PSU @ PS1 has FAILED.
SC Alert: PSU @ PS1 has been removed.
SC Alert: Required PSU @ PS1 is not present.
SC Alert: PSU @ PS1 has been inserted.
sc> poweron PS1
sc>
sc>
SC Alert: PSU @ PS1 has FAILED.
sc> removefru PS1
Are you sure you want to remove PS1 [y/n]? y
sc>
SC Alert: PSU @ PS1 has FAILED.
SC Alert: PSU @ PS1 has FAILED.
SC Alert: PSU @ PS1 has FAILED.
SC Alert: PSU @ PS1 has been removed.
SC Alert: Required PSU @ PS1 is not present.
SC Alert: PSU @ PS1 has been inserted.
sc> poweron PS1
sc>
Monday, September 8, 2008
HPUX How to check if patch have been installed and install the patch
let say we want to check for this patch
phkl_36745 and phne_37489
# swlist -l patch | grep -i phkl_36745
PHKL_36745.LVM-KRN 1.0 LVM.LVM-KRN applied
PHKL_36745.CORE2-KRN 1.0 OS-Core.CORE2-KRN applied
# PHKL_36745 1.0 LVM Cumulative Patch
# PHKL_36745.CORE2-KRN 1.0 OS-Core.CORE2-KRN applied
# PHKL_36745.LVM-KRN 1.0 LVM.LVM-KRN applied
# swlist -l patch | grep -i phne_37489
#
This shows that patch phkl_36745 is installed and phne_37489 is not installed yet
1. Back up your system before installing a patch.
2. Login as root.
3. Download and Copy the patch to the /tmp directory.
4. Move to the /tmp directory and unshar the patch:
cd /tmp
sh PHNE_37489
5. Run swinstall to install the patch:
swinstall -x autoreboot=true -x patch_match_target=true \
-s /tmp/PHNE_37489.depot
By default swinstall will archive the original software in
/var/adm/sw/save/PHNE_37489. If you do not wish to retain a
copy of the original software, include the patch_save_files
option in the swinstall command above:
-x patch_save_files=false
WARNING: If patch_save_files is false when a patch is installed,
the patch cannot be deinstalled. Please be careful
when using this feature.
For future reference, the contents of the PHNE_37489.text file is
available in the product readme:
swlist -l product -a readme -d @ /tmp/PHNE_37489.depot
To put this patch on a magnetic tape and install from the
tape drive, use the command:
dd if=/tmp/PHNE_37489.depot of=/dev/rmt/0m bs=2k
phkl_36745 and phne_37489
# swlist -l patch | grep -i phkl_36745
PHKL_36745.LVM-KRN 1.0 LVM.LVM-KRN applied
PHKL_36745.CORE2-KRN 1.0 OS-Core.CORE2-KRN applied
# PHKL_36745 1.0 LVM Cumulative Patch
# PHKL_36745.CORE2-KRN 1.0 OS-Core.CORE2-KRN applied
# PHKL_36745.LVM-KRN 1.0 LVM.LVM-KRN applied
# swlist -l patch | grep -i phne_37489
#
This shows that patch phkl_36745 is installed and phne_37489 is not installed yet
1. Back up your system before installing a patch.
2. Login as root.
3. Download and Copy the patch to the /tmp directory.
4. Move to the /tmp directory and unshar the patch:
cd /tmp
sh PHNE_37489
5. Run swinstall to install the patch:
swinstall -x autoreboot=true -x patch_match_target=true \
-s /tmp/PHNE_37489.depot
By default swinstall will archive the original software in
/var/adm/sw/save/PHNE_37489. If you do not wish to retain a
copy of the original software, include the patch_save_files
option in the swinstall command above:
-x patch_save_files=false
WARNING: If patch_save_files is false when a patch is installed,
the patch cannot be deinstalled. Please be careful
when using this feature.
For future reference, the contents of the PHNE_37489.text file is
available in the product readme:
swlist -l product -a readme -d @ /tmp/PHNE_37489.depot
To put this patch on a magnetic tape and install from the
tape drive, use the command:
dd if=/tmp/PHNE_37489.depot of=/dev/rmt/0m bs=2k
Friday, September 5, 2008
HPUX check network interface ip and status
# lanscan
Hardware Station Crd Hdw Net-Interface NM MAC HP-DLPI DLPI
Path Address In# State NamePPA ID Type Support Mjr#
0/1/2/0 0x00156004DF56 0 UP lan0 snap0 1 ETHER Yes 119
VLAN5001 0x00156004DF56 5001 UP lan5001 snap5001 7 ETHER Yes 119
VLAN5000 0x00156004DF56 5000 UP lan5000 snap5000 6 ETHER Yes 119
0/1/2/1 0x00156004DF57 1 UP lan1 snap1 2 ETHER Yes 119
0/5/1/0 0x00163584E02A 2 UP lan2 snap2 3 ETHER Yes 119
#
# ifconfig lan0
ifconfig: no such interface
# ifconfig lan2
ifconfig: no such interface
# netstat -i
Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
lan5001 1500 172.30.225.0 mama201-nb 2531362 0 18691869 0 0
lan5000 1500 142.133.20.0 mama201 1388313474 0 1440717679 0 0
lo0 4136 loopback localhost 30234682 0 30234690 0 0
# ifconfig lan5001
lan5001: flags=4000000000001843
inet 172.30.225.26 netmask ffffff00 broadcast 172.30.225.255
#j
Hardware Station Crd Hdw Net-Interface NM MAC HP-DLPI DLPI
Path Address In# State NamePPA ID Type Support Mjr#
0/1/2/0 0x00156004DF56 0 UP lan0 snap0 1 ETHER Yes 119
VLAN5001 0x00156004DF56 5001 UP lan5001 snap5001 7 ETHER Yes 119
VLAN5000 0x00156004DF56 5000 UP lan5000 snap5000 6 ETHER Yes 119
0/1/2/1 0x00156004DF57 1 UP lan1 snap1 2 ETHER Yes 119
0/5/1/0 0x00163584E02A 2 UP lan2 snap2 3 ETHER Yes 119
#
# ifconfig lan0
ifconfig: no such interface
# ifconfig lan2
ifconfig: no such interface
# netstat -i
Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
lan5001 1500 172.30.225.0 mama201-nb 2531362 0 18691869 0 0
lan5000 1500 142.133.20.0 mama201 1388313474 0 1440717679 0 0
lo0 4136 loopback localhost 30234682 0 30234690 0 0
# ifconfig lan5001
lan5001: flags=4000000000001843
inet 172.30.225.26 netmask ffffff00 broadcast 172.30.225.255
#j
Sudoers Example: su - root
As root
#visudo
add below lines, where user123 is uid that need su - privillage
user123 ALL=(ALL) /usr/bin/su -
user124 ALL=(ALL) /usr/bin/su -
user125 ALL=(ALL) /usr/bin/su -
user126 ALL=(ALL) /usr/bin/su -
:wq!
as user123
user123% sudo su -
We trust you have received the usual lecture from the local System
Administrator. It usually boils down to these two things:
#1) Respect the privacy of others.
#2) Think before you type.
Password:
Authorized access only!
root#
#visudo
add below lines, where user123 is uid that need su - privillage
user123 ALL=(ALL) /usr/bin/su -
user124 ALL=(ALL) /usr/bin/su -
user125 ALL=(ALL) /usr/bin/su -
user126 ALL=(ALL) /usr/bin/su -
:wq!
as user123
user123% sudo su -
We trust you have received the usual lecture from the local System
Administrator. It usually boils down to these two things:
#1) Respect the privacy of others.
#2) Think before you type.
Password:
Authorized access only!
root#
Subscribe to:
Posts (Atom)
UNIX: How to print column nicely using printf
[user@hostfwnms1-oam tmp]# cat b.sh printf "%-26s %-19s %-8s %-8s %-s %-s\n" HOSTNAME IP PING SNMPWALK 0-ok 1-fail for i in `cat n...
-
This does increase the amount of CPU and I/O that both your sending and receiving side use, but I’ve been able to run ~25 parallel instance...
-
syntax: rmvterm –m {msys} –p {lpar} # rmvterm -m Server-9117-570-SN103FACD_B -p WBITVIO2
-
Cluster operations Start VCS hastart [-force-stale] hasys -force system Stop VCS hastop -local [-force-evacuate] hastop -sys system [-force-...