martes, 26 de febrero de 2019

Reglas Udev Oracle


cd /etc/udev/rules.d


vi 96-dm-oracle-permissions.rules

##### SE AGREGA LINEA POR CADA DISCO ######

ENV{DM_NAME}=="mpdata01", OWNER:="oracle", GROUP:="oinstall", MODE:="660"
ENV{DM_NAME}=="mpdata01p1", OWNER:="oracle", GROUP:="oinstall", MODE:="660"

ENV{DM_NAME}=="mpdata02", OWNER:="oracle", GROUP:="oinstall", MODE:="660"
ENV{DM_NAME}=="mpdata02p1", OWNER:="oracle", GROUP:="oinstall", MODE:="660"

#################################################

luego ejecutar:

[root@linux]# udevadm control --reload-rules
[root@linux]# udevadm trigger --type=devices --action=change

Esto te muestra las propiedades y te puede ayudar:
udevadm info –query=property –name /dev/xxxxxx








lunes, 25 de febrero de 2019

LVM (cortitos)


#pvcreate /dev/mapper/mp_u00p1
#vgcreate vg_u00 /dev/mapper/mp_u00p1
#lvcreate -n lv_u00 -l 100%FREE vg_u00
#mkdir /u00
#mkfs.ext4 /dev/vg_u00/lv_u00
#tune2fs -c -1 /dev/vg_u00/lv_u00

RESIZE FS  
#RHEL 5 y 6
#lvresize -L +8G /dev/<volumegroup>/<logicalvolume>
#resize2fs -p /dev/<volumegroup>/<logicalvolume>10G

#RHEL4
#ext2online /dev/<volumegroup>/<logicalvolume>

jueves, 21 de febrero de 2019

Falla en la desinstalacion de Access Control


Si al intentar desisntalar el access control, nos tira un error del tipo:

[root@app4 lbin]# /opt/CA/eTrustAccessControl/lbin/Uninstall
ERROR: Another CA ControlMinder process is currently running.
Wait until the process completes and run uninstallation again,
or remove /tmp/pre_install.
[root@app4 lbin]#

La solucion es borrar el /tmp/pre-install

[root@app4 lbin]# rm -fr /tmp/pre_install
[root@app4 lbin]# /opt/CA/eTrustAccessControl/lbin/Uninstall
Removing CA ControlMinder service from /etc/services
Removing CA ControlMinder system call loader.
Trying to remove references to CA ControlMinder from the system init files.
Removing CA ControlMinder PAM module from your system.
Restoring /etc/pam.d/login by removing last changes made to file
Removing CA ControlMinder Admin tools binaries and directories.
Removing CA ControlMinder Admin tools X-resource files.
The CA ControlMinder Admin tools files have now
been removed from your system.
Removing seos.ini from /etc
The token global.AC_Version, now set to 'none', (was '12.xx.0.2919')
Removing ControlMinder from AccessControl Shared dependency list
Removing AccessControl Shared
Removing CAPKI.
Removing CA ControlMinder binaries and directories.

UnInstall complete.

The CA ControlMinder files have now been removed.

Warning: If you have replaced /bin/passwd with sepass,
         You must restore the original /bin/passwd.

         You may edit your /etc/passwd and /etc/group
         files, in order to remove unnecessary users.

[root@app4 lbin]#

Upgrade version de SO desde el satellite


subscription-manager register --org="miOrga" --activationkey="ak_miOrga"
subscription-manager release --set=6.9
subscription-manager release --show
yum --releasever=6.9 update

miércoles, 20 de febrero de 2019

Agregar VIP al cluster 6.6


Editar el archivo /etc/cluster/cluster.conf y agregar la ip como servicio y como recurso

[root@cluster01]# vim cluster.conf
<resources>
      <ip address="10.5.254.3" monitor_link="1"/>
</resources>

<service autostart="1" domain="fd-mbdvdes" name="svc-mbd" recovery="relocate">
              <ip ref="10.5.254.3"/>
</service>

Verificar la version actual del archivo de configuracion del cluster, con el siguiente comando

[root@cluster01]# cman_tool version
6.2.0 config 24
[root@cluster01]#

Ahora con ese dato, editar el archivo de configuracion del cluster ( en ambos nodos del cluster ) y en la primera linea modificarle al version ( sumaremos 1 a la actual, en este caso sera 25 )

[root@cluster01]# vim cluster.conf
<?xml version="1.0"?>
<cluster config_version="25" name="cl-cluster">

Ahora si, replicar la configuracion con el siguiente comando ( ejecutarlo solo en uno de los nodos ) :

cman_tool version -r -S

Ahora chequeo si levanto la ip VIP del cluster

[root@cluster01]# ip ad sh |grep glo |grep bond0
    inet 10.5.252.2/22 brd 10.5.255.255 scope global bond0.150
    inet 10.5.254.3/22 scope global secondary bond0.150
[root@cluster01]#

martes, 19 de febrero de 2019

Habilitar core dumps (crash dumps)


cat /etc/kdump.conf |grep -v "#"
path /var/crash
core_collector  makedumpfile -c --message-level 1 -d 31

[root@prod5 ]# service kdump restart
Stopping kdump:                                            [  OK  ]
Detected change(s) the following file(s):

  /etc/kdump.conf
Rebuilding /boot/initrd-2.6.32-358.el6.x86_64kdump.img
No module seos found for kernel 2.6.32-358.el6.x86_64, aborting.
Failed to run mkdumprd
Starting kdump:                                            [FAILED]
Solucion
secons -sk
SEOS_load -u
y reintentar
si no funciona bootearlo sin ac
chkconfig seos off
reboot
service kdump restart

Probar kdump
#probar kdump
#Then type the following commands at a shell prompt:
#echo 1 > /proc/sys/kernel/sysrq
#echo c > /proc/sysrq-trigger

Sacar EMC powerpath ** lleva reboot del server**


Chequeo los valores actuales del multiapth y luego de los physical volume
# multipath -ll
Pseudo name=emcpowera
Symmetrix ID=000295700059
Logical device ID=1D4E
state=alive; policy=BasicFailover; queued-IOs=0
==============================================================================
--------------- Host ---------------   - Stor -  -- I/O Path --   -- Stats ---
###  HW Path               I/O Paths    Interf.  Mode     State   Q-IOs Errors
==============================================================================
   6 lpfc                   sdu         FA  6eB  unlic    alive      0      0
   4 lpfc                   sdk         FA 11eA  active   alive      0      0

Pseudo name=emcpowerb
Symmetrix ID=000295700059
Logical device ID=1D4D
state=alive; policy=BasicFailover; queued-IOs=0
==============================================================================
--------------- Host ---------------   - Stor -  -- I/O Path --   -- Stats ---
###  HW Path               I/O Paths    Interf.  Mode     State   Q-IOs Errors
==============================================================================
   6 lpfc                   sdt         FA  6eB  unlic    alive      0      0
   4 lpfc                   sdj         FA 11eA  active   alive      0      0

Pseudo name=emcpowerc
Symmetrix ID=000295700059
Logical device ID=1D4C
state=alive; policy=BasicFailover; queued-IOs=0
==============================================================================
--------------- Host ---------------   - Stor -  -- I/O Path --   -- Stats ---
###  HW Path               I/O Paths    Interf.  Mode     State   Q-IOs Errors
==============================================================================
   6 lpfc                   sds         FA  6eB  unlic    alive      0      0
   4 lpfc                   sdi         FA 11eA  active   alive      0      0

 FA  6eB  unlic    alive      0      0
   4 lpfc                   sdf         FA 11eA  active   alive      0      0

#

Chequeo los physical volume actuales

# pvs
  PV              VG           Fmt  Attr PSize   PFree
  /dev/emcpoweri1 vg_u01       lvm2 a--   60.00g      0
  /dev/sda2       vg_omidbsrv2 lvm2 a--  278.88g 224.07g
#

Ahora hago la magia

# /etc/init.d/PowerPath stop
Stopping PowerPath: Following LVM volume groups are in use:
      Volume Group: vg_u01 (/dev/emcpoweri1)

These open logical volume devices use LUNs from PowerPath managed devices,
Please reissue the command after closing these volumes.
 failed

# mv /etc/init.d/PowerPath   /root/PowerPath
# 
# reboot -f


Luego del Reboot hacer

# powermt display dev=all
ERROR: Cannot open PowerPath. Initialization error

# pvs
  Found duplicate PV BKQDRy80KHl3e8ZDuJM4rws21cgjTlU0: using /dev/sdc1 not /dev/sdq1
  PV         VG           Fmt  Attr PSize   PFree  
  /dev/sda2  vg_omidbsrv2 lvm2 a--  278.88g 224.07g
  /dev/sdc1  vg_u01       lvm2 a--   60.00g      0 

# rpm -qa |grep -i emc
EMCpower.LINUX-5.7.1.00.00-029.el6.x86_64
# rpm -e EMCpower.LINUX-5.7.1.00.00-029.el6.x86_64

# yum install device-mapper-multipath

# cd /
# cp ./usr/share/doc/device-mapper-multipath-0.4.9/multipath.conf /etc/multipath.conf

# service multipathd start
Starting multipathd daemon: [  OK  ]
# multipath -ll

# chkconfig multipathd on
# 

martes, 12 de febrero de 2019

Actualizar Version de Sistema Operativo


En el equipo stlinux1, ejecutar :

stlinux1 ~]# cd /SOFT/repos
stlinux1 repos]# ls -ltr
total 4
drwxr-xr-x 3 root root   21 Dec 14 15:29 7.3
drwxr-xr-x 3 root root   21 Dec 14 15:31 6.3
drwxr-xr-x 3 root root   21 Dec 14 15:32 5.8
drwxr-xr-x 3 root root   21 Dec 14 15:33 5.4
drwxr-xr-x 3 root root   21 Dec 14 15:34 6.8
drwxr-xr-x 3 root root   21 Dec 14 15:36 6.6
-rwxr--r-- 1 root root 3159 Dec 17 16:22 crea_repo_con_menu.sh
drwxr-xr-x 3 root root   21 Dec 19 10:58 7.6
drwxr-xr-x 3 root root   21 Dec 19 11:17 7.4
drwxr-xr-x 3 root root   21 Jan  9 03:45 6.4
drwxr-xr-x 3 root root   21 Feb  4 09:48 6.9
stlinux1 repos]# 

Ejecutar sh crea_repo_con_menu.sh y seleccionar la version a la cual queremos actualizar el SO

sh crea_repo_con_menu.sh


Me genera esta salida, de la cual voy a extraer los datos para modificar
 luego el cliente a actualizar.


El iso elegido es : rhel-server-6.9-x86_64-dvd.iso
Release : 6.9
Arch : x86_64
Releasever : 6Server

Verficando/Updateando repo localmente
El repo ya existe existe
/SOFT/repos/6.9/6Server/x86_64

###################################################################
##### Crear en el cliente /etc/yum.repos.d/stlinux.repo con lo #####
##### siguiente:                                              #####
###################################################################

[stlinux-Server]
name=Red Hat Enterprise Linux $releasever - $basearch - stlinux
baseurl=http://10.1.3.37/6.9/$releasever/$basearch/Server/
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release


En el cliente a actualizar tengo que ejecutar
subscription-manager clean
copiar y pegar desde [stlinux-Server] hasta gpgkey incluido, dentro del archivo /etc/yum.repos.d/stlinux.repo


[stlinux-Server]
name=Red Hat Enterprise Linux $releasever - $basearch - stlinux
baseurl=http://10.1.3.37/6.9/$releasever/$basearch/Server/
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release


yum clean all
yum update #Yes
luego reboot

lunes, 11 de febrero de 2019

rpm colgado


El sintoma es que el rpm -qa se queda colgado. La manera de solucionarlo fue matar los procesos asociados al rpm y reconstruir la base de rpm borrandola previamente.

# ps -ef | grep rpm
root      7729  5539  0 15:45 ?        00:00:00 /bin/sh /etc/cron.daily/rpm
root      7730  5539  0 15:45 ?        00:00:00 awk -v progname=/etc/cron.daily/rpm progname {?????   print progname ":\n"?????   progname="";????       }????       { print; }
root      7733  7729  0 15:45 ?        00:00:00 /usr/lib/rpm/rpmq -q --all --qf %{name}-%{version}-%{release}.%{arch}.rpm\n
root      7737  7461  0 15:45 pts/2    00:00:00 /usr/lib/rpm/rpmq -q --all
root      7882  7601  0 15:47 pts/7    00:00:00 grep rpm
# kill -9 7729 7730 7733 7737
#  ps -ef | grep rpm         
root      7963  7601  0 15:48 pts/7    00:00:00 grep rpm
# rm /var/lib/rpm/__db*
rm: remove regular file `/var/lib/rpm/__db.001'? y
rm: remove regular file `/var/lib/rpm/__db.002'? y
rm: remove regular file `/var/lib/rpm/__db.003'? y
# rpm --rebuilddb





jueves, 7 de febrero de 2019

Network error, unable to connect to server. Please /var/log/rhsm/rhsm.log for more information.


Al intentar registrar un equipo al satellite, que no habia sido registrado anteriormente, arrojo el error mencioando en el titulo de este post.

# subscription-manager register --org=”Orga” --activationkey=”ak_Orga”
Network error, unable to connect to server. Please see /var/log/rhsm/rhsm.log for more information.
#
curl -O https://sat1.dominio.com.ar/pub/katello-ca-consumer-latest.noarch.rpm --insecure
# rpm -ivh katello-ca-consumer-latest.noarch.rpm
Preparing...                ########################################### [100%]
   1:katello-ca-consumer-plr########################################### [100%]
#
# subscription-manager register --org="Orga" --activationkey="ak_Orga"
The system has been registered with ID: 5c29ba6f-83a6-4a9e-aa18-62c60xaafe71
Installed Product Current Status:
Product Name: Red Hat Enterprise Linux Server
Status:       Subscribed

# yum clean all
Loaded plugins: product-id, security, subscription-manager
Cleaning repos: InstallMedia rhel-6-server-optional-rpms rhel-6-server-rpms
Cleaning up Everything
# yum install net-snmp net-snmp-utils







error Can't open /dev/mapper/mp_u01 exclusively. Mounted filesystem?


Este error se produjo al intentar agregar o remover un disco

La solucion fue borrarlo con el dm-setup y volverlo a crear como se muestra a continuacion

# pvcreate /dev/mapper/mp_u01
  Can't open /dev/mapper/mp_u01 exclusively.  Mounted filesystem?

# dmsetup info -C
Name               Maj Min Stat Open Targ Event  UUID
mpathak            253  70 L--w    0    1      1 mpath-36000144000000010f0058d828093e679
mpdata07           253  42 L--w    1    1      1 mpath-360000970000292603946533030334646
mpdata09p1         253  38 L--w  105    1      0 part1-mpath-360000970000292603946533030334533
mpfra_01           253  47 L--w    1    1      1 mpath-360000970000292603946533030343033
mpathap2           253   3 L--w    3    1      0 part2-mpath-3600508b1001c33eca8bffe940a55d328
mpathaj            253  64 L--w    0    1      1 mpath-36000144000000010f0058d828093e65f
VolGroup00-lv_swap 253  52 L--w    1    1      0 LVM-e16sRDsZfUWb1Tet4ezit8hAzJtMQjrVjOrQlG97MssYdlXrPzHCcqQJlutRiWcM
mpdata06           253  32 L--w    1    1      1 mpath-360000970000292603946533030334537
VG_app-lv_app      253  56 L--w    1    1      0 LVM-2kHoNZKFTh4zbnOZvsMRd9wuhyrcLdMebmg73Aut8Zw6molkBjC6vL99MU6aToGs
VolGroup00-lv_root 253  51 L--w    1    1      0 LVM-e16sRDsZfUWb1Tet4ezit8hAzJtMQjrVsQKPdVwFz4U5UKamzS7Zzz8DU1cIA0Si
mpocr03            253  16 L--w    1    1      1 mpath-360000970000292603946533030344143
mpathap1           253   2 L--w    1    1      0 part1-mpath-3600508b1001c33eca8bffe940a55d328
mpu00p1            253  14 L--w    1    1      0 part1-mpath-360000970000292603946533030343244
mpathai            253  63 L--w    0    1      1 mpath-36000144000000010f0058d828093e655
mpdata05           253  33 L--w    1    1      3 mpath-360000970000292603946533030334542
mpathc             253   4 L--r    0    1      1 mpath-360000970000292603946533030303530
mpocr02            253  22 L--w    1    1      1 mpath-360000970000292603946533030344144
mpocr01p1          253  19 L--w    8    1      0 part1-mpath-360000970000292603946533030344145
mpu01p1            253  11 L--w    1    1      0 part1-mpath-360000970000292603946533030343245
mpocr02p1          253  28 L--w    4    1      0 part1-mpath-360000970000292603946533030344144
mpathah            253  61 L--w    0    1      1 mpath-36000144000000010f0058d828093e650
mpdata04           253  35 L--w    1    1      1 mpath-360000970000292603946533030334546
mpathb             253   1 L--w    0    1      1 mpath-3600508b1001cdc8279ddb944060d80dd
mpocr01            253  12 L--w    1    1      3 mpath-360000970000292603946533030344145
mpocr03p1          253  23 L--w    5    1      0 part1-mpath-360000970000292603946533030344143
VG_u01-lv_u01      253  54 L--w    1    1      0 LVM-TCKdgDIRBA0gtx8HpGXam20xk0p97uwCQu7qLu7R077NgxmwymnY6Y3l8t2aWIE0
mpathag            253  62 L--w    0    1      1 mpath-36000144000000010f0058d828093e65a
mpdata03           253  36 L--w    1    1      1 mpath-360000970000292603946533030334633
mpatha             253   0 L--w    2    1      0 mpath-3600508b1001c33eca8bffe940a55d328
mpdata10p1         253  37 L--w  106    1      0 part1-mpath-360000970000292603946533030334446
mp_logsp1          253  68 L--w    1    1      0 part1-mpath-36000144000000010f0058d828093e649
mpathaf            253  65 L--w    0    1      1 mpath-36000144000000010f0058d828093e664
mpdata02           253  39 L--w    1    1      1 mpath-360000970000292603946533030334637
mpdata11p1         253  34 L--w  107    1      0 part1-mpath-360000970000292603946533030334442
mpappsp1           253   9 L--w    1    1      0 part1-mpath-360000970000292603946533030343231
mpdata12p1         253  31 L--w  108    1      0 part1-mpath-360000970000292603946533030334437
mpdata01           253  45 L--w    1    1      1 mpath-360000970000292603946533030334642
VG_logs-pvmove0    253  73 L--w    1    1      0 LVM-rnGCrs5MGQMsHW6Ec9PWN0L9sYQ1ceKwSnRphlJfL8GCuiwYOEPmhavL24wLeoad
mpdata13p1         253  25 L--w  105    1      0 part1-mpath-360000970000292603946533030334433
mpdata14p1         253  27 L--w  105    1      0 part1-mpath-360000970000292603946533030334346
mpdata15           253  13 L--w    1    1      1 mpath-360000970000292603946533030334342
mp_logs            253  60 L--w    1    1      1 mpath-36000144000000010f0058d828093e649
mpdata15p1         253  20 L--w  107    1      0 part1-mpath-360000970000292603946533030334342
mpathac            253  59 L--w    0    1      1 mpath-36000144000000010f0058d828093e644
mpdata14           253  18 L--w    1    1      3 mpath-360000970000292603946533030334346
mpdata01p1         253  49 L--w  108    1      0 part1-mpath-360000970000292603946533030334642
mpathao            253  71 L--w    0    1      1 mpath-36000144000000010f0058d828093e67e
mpdata02p1         253  46 L--w  107    1      0 part1-mpath-360000970000292603946533030334637
mpfra_01p1         253  50 L--w   17    1      0 part1-mpath-360000970000292603946533030343033
mpdata13           253  17 L--w    1    1      1 mpath-360000970000292603946533030334433
mp_u01             253  58 L--w    1    1      1 mpath-36000144000000010f0058d828093e63f
mplogs             253   6 L--w    1    1      1 mpath-360000970000292603946533030343236
mpdata03p1         253  44 L--w  111    1      0 part1-mpath-360000970000292603946533030334633
mpathan            253  66 L--w    0    1      1 mpath-36000144000000010f0058d828093e669
mpdata04p1         253  43 L--w  107    1      0 part1-mpath-360000970000292603946533030334546
mpdata12           253  24 L--w    1    1      1 mpath-360000970000292603946533030334437
mp_u00             253  72 L--w    1    1      1 mpath-36000144000000010f0058d828093e63a
VG_logs-lv_logs    253  55 L--w    1    1      0 LVM-rnGCrs5MGQMsHW6Ec9PWN0L9sYQ1ceKweTWBIajB0zDGXuD0BUNiDdN2GYbRaKnX
VolGroup00-lv_home 253  57 L--w    1    1      0 LVM-e16sRDsZfUWb1Tet4ezit8hAzJtMQjrVjVNtDyy4HR2PDYKdq8XWD3He0DACFTe5
mpdata05p1         253  41 L--w  110    1      0 part1-mpath-360000970000292603946533030334542
mp_u00p1           253  74 L--w    0    1      0 part1-mpath-36000144000000010f0058d828093e63a
mpatham            253  67 L--w    0    1      1 mpath-36000144000000010f0058d828093e66e
VG_u00-lv_u00      253  53 L--w    1    1      0 LVM-niGBoMDkV3y8BFKMWfRMwbxT3h2gePlw30jEiPqpIJZGPXMskKSnY2oJdegLlzPm
mpdata09           253  30 L--w    1    1      1 mpath-360000970000292603946533030334533
mpdata11           253  26 L--w    1    1      1 mpath-360000970000292603946533030334442
mpapps             253   5 L--w    1    1      1 mpath-360000970000292603946533030343231
mpdata06p1         253  40 L--w  114    1      0 part1-mpath-360000970000292603946533030334537
mp_u01p1           253  75 L--w    0    1      0
mplogsp1           253  10 L--w    1    1      0 part1-mpath-360000970000292603946533030343236
mpu01              253   7 L--w    1    1      1 mpath-360000970000292603946533030343245
mpathal            253  69 L--w    0    1      1 mpath-36000144000000010f0058d828093e673
mpdata07p1         253  48 L--w  106    1      0 part1-mpath-360000970000292603946533030334646
mpdata08           253  15 L--w    1    1      1 mpath-360000970000292603946533030334337
mpdata10           253  29 L--w    1    1      3 mpath-360000970000292603946533030334446
mpdata08p1         253  21 L--w  107    1      0 part1-mpath-360000970000292603946533030334337
mpu00              253   8 L--w    1    1      1 mpath-360000970000292603946533030343244

# dmsetup remove mp_u01p1
# pvcreate /dev/mapper/mp_u01
  Writing physical volume data to disk "/dev/mapper/mp_u01"
  Physical volume "/dev/mapper/mp_u01" successfully created
#

lunes, 4 de febrero de 2019

Cambiar parametros STACK oracle db


Cambiar parametro STACK para la instalacion de base de datos oracle:

# ulimit -Ss
8192

vi /etc/security/limits.conf

oracle soft stack 10240   <----agregar esta linea!

Verificacion del parametro:

# su - oracle
DBCOREP:/u00/app/oracle> ulimit -Ss
10240  <---- Parametro ok!