Kamis, 09 November 2017

Get problem when install OSC 4.3 SRU 8.4.0

root@clusternode1:/export/home/admin# pkg publisher
PUBLISHER TYPE STATUS P LOCATION
solaris origin online F file:///repo-full/repo/
solaris origin online F file:///repo-incr/
ha-cluster origin online F file:///osc-repo-full/repo/
ha-cluster origin online F file:///osc-repo-incr/repo/
root@clusternode1:/export/home/admin# pkg install ha-cluster-full
Packages to install: 92
Services to change: 10
Create boot environment: No
Create backup boot environment: Yes


pkg: The following packages require their licenses to be accepted before they can be installed or updated:
----------------------------------------
Package: pkg://solaris/developer/java/jdk-7@1.7.0.151.15,5.11:20170713T203336Z

License: BCL
License requires acceptance.

To indicate that you agree to and accept the terms of the licenses of the packages listed above, use the --accept option. To display all of the related licenses, use the --licenses option.

solution
-----------
root@clusternode1:/export/home/admin# pkg install --accept ha-cluster-full
Packages to install: 92
Services to change: 10
Create boot environment: No
Create backup boot environment: Yes

DOWNLOAD PKGS FILES XFER (MB) SPEED
Completed 92/92 7626/7626 314.4/314.4 0B/s

PHASE ITEMS
Installing new actions 10979/10979
Updating package state database Done
Updating package cache 0/0
Updating image state Done
Creating fast lookup database Done
Updating package cache 2/2
root@clusternode1:/export/home/admin#

How to install Solaris cluster framework 4.1

root@SMJKT-PRFND01:/mnt# mount -F hsfs sol-11_1-repo-full.iso /repo-full/
root@SMJKT-PRFND01:/mnt# mount -F hsfs sol-11_1_11_4_0-incr-repo.iso /repo-incr/
root@SMJKT-PRFND01:/mnt#

root@SMJKT-PRFND01:/mnt/SunCluster/progs/SunCluster/4.1# mount -F hsfs osc-4_1-ga-repo-full.iso /osc-repofull/

root@SMJKT-PRFND01:/mnt/SunCluster/progs/SunCluster/4.1# mount -F hsfs osc-4_1_2-1-repo-incr.iso /osc-repoincr/

root@SMJKT-PRFND01:/mnt/SunCluster/progs/SunCluster/4.1# pkg set-publisher -g file:///osc-repofull/repo ha-cluster
root@SMJKT-PRFND01:/mnt/SunCluster/progs/SunCluster/4.1# pkg set-publisher -g file:///osc-repoincr/repo ha-cluster


root@node1:/mnt/SunCluster/progs/SunCluster/4.1# pkg publisher
PUBLISHER TYPE STATUS P LOCATION
solaris origin online F file:///net/192.168.1.10/mnt/repo/
solaris origin online F file:///repo-full/repo/
solaris origin online F file:///repo-incr/repo/
ha-cluster origin online F file:///osc-repofull/repo/
ha-cluster origin online F file:///osc-repoincr/repo/


root@node1:/mnt# /usr/bin/pkg install ha-cluster-full
Packages to install: 73
Create boot environment: No
Create backup boot environment: Yes
Services to change: 7

DOWNLOAD PKGS FILES XFER (MB) SPEED
Completed 73/73 9089/9089 73.0/73.0 0B/s

PHASE ITEMS
Installing new actions 11941/11941
Updating package state database Done
Updating image state Done
Creating fast lookup database Done
root@node1:/mnt#

How to remove metaset cluster 3.x

check disk group
# cldg status

check diskset
# metaset

command from 1st node
=================
remove diskset from 2nd node
remove DID disk from diskset
remove diskset from 1st node


example
=======
root@andromeda1 # metaset

Set name = datacopy, Set number = 3

Host Owner
andromeda1 Yes
andromeda2

Mediator Host(s) Aliases
andromeda1
andromeda2

Driv Dbase

d1 Yes

Set name = zonecl03, Set number = 4

Host Owner
andromeda1 Yes
andromeda2

Driv Dbase

d7 Yes

root@andromeda1 # metaset -s zonecl03 -d -h andromeda2
root@andromeda1 # metaset -s zonecl03 -f -d /dev/did/rdsk/d7
root@andromeda1 # metaset -s zonecl03 -d -h andromeda1
root@andromeda1 # metaset

root@andromeda1 # metaset -s datacopy -t

Set name = datacopy, Set number = 3

Host Owner
andromeda1 Yes
andromeda2

Mediator Host(s) Aliases
andromeda1
andromeda2

Driv Dbase

d1 Yes
root@andromeda1 #

How to create metadevice with mirror solaris cluster 3.x

create metaset
# metaset -s diskset -a -h node1
# metaset -s diskset -a -h node2

add DID device
# metaset -s diskset -a DID

create metadevice
# metainit -s diskset dn 1 1 /dev/did/rdsk/1st-DIDs0
# metainit -s diskset dn 1 1 /dev/did/rdsk/2nd-DIDs0
# metainit -s diskset dn -m d-submirror

metainit -s hartis-ds d10 1 1 dev

example

root@andromeda1 # metaset

Set name = datacopy, Set number = 3

Host Owner
andromeda1 Yes
andromeda2

Mediator Host(s) Aliases
andromeda1
andromeda2

Driv Dbase

d1 Yes

root@andromeda1 # metaset -s datacopy -a /dev/did/rdsk/d7
root@andromeda1 #

root@andromeda1 # metaset

Set name = datacopy, Set number = 3

Host Owner
andromeda1 Yes
andromeda2

Mediator Host(s) Aliases
andromeda1
andromeda2

Driv Dbase

d1 Yes

d7 Yes

root@andromeda1 # metainit -s datacopy d100 1 1 /dev/did/rdsk/d1s0
datacopy/d100: Concat/Stripe is setup
root@andromeda1 #

root@andromeda1 # metainit -s datacopy d200 1 1 /dev/did/rdsk/d7s0
datacopy/d200: Concat/Stripe is setup

root@andromeda1 # metainit -s datacopy d10 -m d100
datacopy/d10: Mirror is setup

root@andromeda1 # metastat -s datacopy
datacopy/d10: Mirror
Submirror 0: datacopy/d100
State: Okay
Pass: 1
Read option: roundrobin (default)
Write option: parallel (default)
Size: 104808448 blocks (49 GB)

datacopy/d100: Submirror of datacopy/d10
State: Okay
Size: 104808448 blocks (49 GB)
Stripe 0:
Device Start Block Dbase State Reloc Hot Spare
d1s0 0 No Okay Yes


datacopy/d200: Concat/Stripe
Size: 104808448 blocks (49 GB)
Stripe 0:
Device Start Block Dbase Reloc
d7s0 0 No Yes

Device Relocation Information:
Device Reloc Device ID
d7 Yes id1,did@n600508b4000138cb0003200000080000
d1 Yes id1,did@n600508b4000138cb00032000000f0000


root@andromeda1 # metastat -s datacopy -p
datacopy/d10 -m datacopy/d100 1
datacopy/d100 1 1 /dev/did/rdsk/d1s0
datacopy/d200 1 1 /dev/did/rdsk/d7s0
root@andromeda1 #

root@andromeda1 # metastat -s datacopy -c
datacopy/d10 m 49GB datacopy/d100
datacopy/d100 s 49GB d1s0
datacopy/d200 s 49GB d7s0


root@andromeda1 # mount /dev/md/datacopy/dsk/d10 /mnt
root@andromeda1 #

root@andromeda1 # metattach -s datacopy d10 d200
datacopy/d10: submirror datacopy/d200 is attached
root@andromeda1 # metastat -s datacopy
datacopy/d10: Mirror
Submirror 0: datacopy/d100
State: Okay
Submirror 1: datacopy/d200
State: Resyncing
Resync in progress: 0 % done
Pass: 1
Read option: roundrobin (default)
Write option: parallel (default)
Size: 104808448 blocks (49 GB)

datacopy/d100: Submirror of datacopy/d10
State: Okay
Size: 104808448 blocks (49 GB)
Stripe 0:
Device Start Block Dbase State Reloc Hot Spare
d1s0 0 No Okay Yes


datacopy/d200: Submirror of datacopy/d10
State: Resyncing
Size: 104808448 blocks (49 GB)
Stripe 0:
Device Start Block Dbase State Reloc Hot Spare
d7s0 0 No Okay Yes


Device Relocation Information:
Device Reloc Device ID
d7 Yes id1,did@n600508b4000138cb0003200000080000
d1 Yes id1,did@n600508b4000138cb00032000000f0000

root@andromeda1 # metastat -s datacopy -c
datacopy/d10 m 49GB datacopy/d100 datacopy/d200 (resync-0%)
datacopy/d100 s 49GB d1s0
datacopy/d200 s 49GB d7s0
root@andromeda1 # metastat -s datacopy -p
datacopy/d10 -m datacopy/d100 datacopy/d200 1
datacopy/d100 1 1 /dev/did/rdsk/d1s0
datacopy/d200 1 1 /dev/did/rdsk/d7s0
root@andromeda1 #

How to purging metaset Solaris cluster 3.x

Check metadevice
di salah satu node
#metastat -s -p
#metastat -s -c

Clear metadevice
disalah satu node
#metaclear -s d-berapa
Di clear submirror dl baru mirror nya

Clear did dari metaset
disalah satu node
#metaset -s -d /dev/did/rdsk/d-berapa

Clear metaset dari node2
dinode1
#metaset -d -h node1

Di node2
#metaset -d -h node2

Jika cara diatas tidak bisa atau ada error dan tdk bisa di remove, pakai
#metaset -s -P -f

Kalau tdk bisa, pakai
Di masing-masing
#metaset -s -C purge


Kalau bandel ga bisa juga pakai DTK (ini hanya ada di solaris cluster versi 3.x, di versi 4.x tidak ada DTK)
#/usr/cluster/dtk/bin/dcs_config -c remove -s
note : install dl dtk

reff :
Solaris Cluster 3.3 Diagnostic Toolkit aka. DTK, SUNWscdtk (Doc ID 1270804.1)

how to rename cluster node solaris cluster 3.3

1. copy file infrastucture in directoru /etc/cluster/ccr/global/
# cp -p /etc/cluster/ccr/global/infrastructure /etc/cluster/ccr/global/infrastructure.old
2. boot in non cluster mode
# init 0
ok boot -x
3. edit file /etc/cluster/ccr/global/infrastructure with below name
cluster.nodes.1.name
cluster.nodes.2.name
4. Regenerate the checksum of the infrastructure file by running:
/usr/cluster/lib/sc/ccradm recover -o infrastructure
5. reboot serve
/usr/sbin/reboot

run it in all node

================================
below example of infrastucture

root@gepci # cat /etc/cluster/ccr/global/infrastructure
ccr_gennum 7
ccr_checksum 0BA5C44857F71EE0E32175BCCDB97DE6
cluster.name gecluster
cluster.state enabled
cluster.properties.cluster_id 0x4EBC78CC
cluster.properties.installmode disabled
cluster.properties.private_net_number 172.16.0.0
cluster.properties.cluster_netmask 255.255.240.0
cluster.properties.private_netmask 255.255.248.0
cluster.properties.private_subnet_netmask 255.255.255.128
cluster.properties.private_user_net_number 172.16.4.0
cluster.properties.private_user_netmask 255.255.254.0
cluster.properties.private_maxnodes 64
cluster.properties.private_maxprivnets 10
cluster.properties.zoneclusters 12
cluster.properties.auth_joinlist_type sys
cluster.properties.auth_joinlist_hostslist .
cluster.properties.transport_heartbeat_timeout 10000
cluster.properties.transport_heartbeat_quantum 1000
cluster.properties.udp_session_timeout 480
cluster.properties.cmm_version 1
cluster.nodes.1.name gepci
cluster.nodes.1.state enabled
cluster.nodes.1.properties.private_hostname clusternode1-priv
cluster.nodes.1.properties.quorum_vote 1
cluster.nodes.1.properties.quorum_resv_key 0x4EBC78CC00000001
cluster.nodes.1.adapters.1.name vnet2
cluster.nodes.1.adapters.1.state enabled
cluster.nodes.1.adapters.1.properties.device_name vnet
cluster.nodes.1.adapters.1.properties.device_instance 2
cluster.nodes.1.adapters.1.properties.transport_type dlpi
cluster.nodes.1.adapters.1.properties.lazy_free 1
cluster.nodes.1.adapters.1.properties.dlpi_heartbeat_timeout 10000
cluster.nodes.1.adapters.1.properties.dlpi_heartbeat_quantum 1000
cluster.nodes.1.adapters.1.properties.nw_bandwidth 80
cluster.nodes.1.adapters.1.properties.bandwidth 70
cluster.nodes.1.adapters.1.properties.ip_address 172.16.0.129
cluster.nodes.1.adapters.1.properties.netmask 255.255.255.128
cluster.nodes.1.adapters.1.ports.1.name 0
cluster.nodes.1.adapters.1.ports.1.state enabled
cluster.nodes.1.adapters.2.name vnet3
cluster.nodes.1.adapters.2.state enabled
cluster.nodes.1.adapters.2.properties.device_name vnet
cluster.nodes.1.adapters.2.properties.device_instance 3
cluster.nodes.1.adapters.2.properties.transport_type dlpi
cluster.nodes.1.adapters.2.properties.lazy_free 1
cluster.nodes.1.adapters.2.properties.dlpi_heartbeat_timeout 10000
cluster.nodes.1.adapters.2.properties.dlpi_heartbeat_quantum 1000
cluster.nodes.1.adapters.2.properties.nw_bandwidth 80
cluster.nodes.1.adapters.2.properties.bandwidth 70
cluster.nodes.1.adapters.2.properties.ip_address 172.16.1.1
cluster.nodes.1.adapters.2.properties.netmask 255.255.255.128
cluster.nodes.1.adapters.2.ports.1.name 0
cluster.nodes.1.adapters.2.ports.1.state enabled
cluster.nodes.1.cmm_version 1
cluster.nodes.2.name gepdb
cluster.nodes.2.state enabled
cluster.nodes.2.properties.quorum_vote 1
cluster.nodes.2.properties.quorum_resv_key 0x4EBC78CC00000002
cluster.nodes.2.properties.private_hostname clusternode2-priv
cluster.nodes.2.adapters.1.name vnet2
cluster.nodes.2.adapters.1.properties.device_name vnet
cluster.nodes.2.adapters.1.properties.device_instance 2
cluster.nodes.2.adapters.1.properties.transport_type dlpi
cluster.nodes.2.adapters.1.properties.lazy_free 1
cluster.nodes.2.adapters.1.properties.dlpi_heartbeat_timeout 10000
cluster.nodes.2.adapters.1.properties.dlpi_heartbeat_quantum 1000
cluster.nodes.2.adapters.1.properties.nw_bandwidth 80
cluster.nodes.2.adapters.1.properties.bandwidth 70
cluster.nodes.2.adapters.1.properties.ip_address 172.16.0.130
cluster.nodes.2.adapters.1.properties.netmask 255.255.255.128
cluster.nodes.2.adapters.1.state enabled
cluster.nodes.2.adapters.1.ports.1.name 0
cluster.nodes.2.adapters.1.ports.1.state enabled
cluster.nodes.2.adapters.2.name vnet3
cluster.nodes.2.adapters.2.properties.device_name vnet
cluster.nodes.2.adapters.2.properties.device_instance 3
cluster.nodes.2.adapters.2.properties.transport_type dlpi
cluster.nodes.2.adapters.2.properties.lazy_free 1
cluster.nodes.2.adapters.2.properties.dlpi_heartbeat_timeout 10000
cluster.nodes.2.adapters.2.properties.dlpi_heartbeat_quantum 1000
cluster.nodes.2.adapters.2.properties.nw_bandwidth 80
cluster.nodes.2.adapters.2.properties.bandwidth 70
cluster.nodes.2.adapters.2.properties.ip_address 172.16.1.2
cluster.nodes.2.adapters.2.properties.netmask 255.255.255.128
cluster.nodes.2.adapters.2.state enabled
cluster.nodes.2.adapters.2.ports.1.name 0
cluster.nodes.2.adapters.2.ports.1.state enabled
cluster.nodes.2.cmm_version 1
cluster.blackboxes.1.name switch1
cluster.blackboxes.1.state enabled
cluster.blackboxes.1.properties.type switch
cluster.blackboxes.1.ports.1.name 1
cluster.blackboxes.1.ports.1.state enabled
cluster.blackboxes.1.ports.2.name 2
cluster.blackboxes.1.ports.2.state enabled
cluster.blackboxes.2.name switch2
cluster.blackboxes.2.state enabled
cluster.blackboxes.2.properties.type switch
cluster.blackboxes.2.ports.1.name 1
cluster.blackboxes.2.ports.1.state enabled
cluster.blackboxes.2.ports.2.name 2
cluster.blackboxes.2.ports.2.state enabled
cluster.cables.1.properties.end1 cluster.nodes.1.adapters.1.ports.1
cluster.cables.1.properties.end2 cluster.blackboxes.1.ports.1
cluster.cables.1.state enabled
cluster.cables.2.properties.end1 cluster.nodes.1.adapters.2.ports.1
cluster.cables.2.properties.end2 cluster.blackboxes.2.ports.1
cluster.cables.2.state enabled
cluster.cables.3.properties.end1 cluster.nodes.2.adapters.1.ports.1
cluster.cables.3.properties.end2 cluster.blackboxes.1.ports.2
cluster.cables.3.state enabled
cluster.cables.4.properties.end1 cluster.nodes.2.adapters.2.ports.1
cluster.cables.4.properties.end2 cluster.blackboxes.2.ports.2
cluster.cables.4.state enabled
cluster.quorum_devices.2.name d4
cluster.quorum_devices.2.state enabled
cluster.quorum_devices.2.properties.votecount 1
cluster.quorum_devices.2.properties.gdevname /dev/did/rdsk/d4s2
cluster.quorum_devices.2.properties.path_1 enabled
cluster.quorum_devices.2.properties.path_2 enabled
cluster.quorum_devices.2.properties.access_mode scsi2
cluster.quorum_devices.2.properties.type shared_disk

referensi :
Doc ID 1018806.1 and modified

How to backup restore rpool (ZFS) solaris 10

In server source
1. zfs list
2. zfs snapshot -r rpool@snap1
3. zfs send -Rv rpool@snap1 > /tmp/rpool.snap1
4. share -F nfs -o ro /tmp

In destination server 
1. boot cdrom -s
2. create rpool
zpool create -f -o failmode=continue -R /a -m legacy -o cachefile=/etc/zfs/zpool.cache rpool c0d1s0
3. mount NSF source
mount -F nfs 192.168.1.10:/tmp /mnt
4. zfs receive
cat /mnt/rpool.snap1 | zfs receive -Fdu rpool
5. set boot-fs
zfs list
zpool set bootfs=rpool/ROOT/s10s_u11wos_24a rpool
6. Installl bootblk
installboot -F zfs /usr/platform/`uname -i`/lib/fs/zfs/bootblk /dev/rdsk/c0d1s0
7. restart
# init 6