https://blog.51cto.com/michaelkang/2167195
本文对敏感信息进行了替换!!!!
cd /workspace/gpdb/
wget dl.#kjh#.com/greenplum-db-5.10.2-rhel7-x86_64.rpm
默认安装到 /usr/local/greenplum-db/ (以root在master执行)
rpm -Uvh greenplum-db-5.10.2-rhel7-x86_64.rpm
source /usr/local/greenplum-db/greenplum_path.sh
gpssh-exkeys -f /workspace/gpdb/gp-all.txt
gpseginstall -f /workspace/gpdb/gp-all.txt -u gpadmin -p gpadmin
source /usr/local/greenplum-db/greenplum_path.sh
gpcheck -f /workspace/gpdb/gp-all.txt -m gpnode615.kjh.com gpnode616.kjh.com \
-s gpnode611.kjh.com gpnode612.kjh.com gpnode613.kjh.com gpnode614.kjh.com
#gpssh工具可以同时在多个机器上执行相同命令,和ansible功能类似
/usr/local/gpdata :master 数据数据目录
/data/gpdata :sg 数据数据目录
/data01/gpdata :sg 数据数据目录
gpssh -f /workspace/gpdb/gp-all.txt
=>
groupadd -g 1888 gpadmin
useradd -g 1888 -u 1888 -m -d /home/gpadmin -s /bin/bash gpadmin
chown -R gpadmin.gpadmin /home/gpadmin
echo "gpadmin" | passwd --stdin gpadmin
mkdir -p /usr/local/gpdata/gpmaster
mkdir -p /data/gpdata/primary
mkdir -p /data01/gpdata/primary
mkdir /data/gpdata/mirror
mkdir /data01/gpdata/mirror
chown -R gpadmin:gpadmin /usr/local/gpdata
chown -R gpadmin:gpadmin /data/gpdata
chown -R gpadmin:gpadmin /data01/gpdata
=> df -h|grep data
[gpnode611.kjh.com] /dev/sdb 894G 33M 894G 1% /data01
[gpnode611.kjh.com] /dev/sda 894G 33M 894G 1% /data
[gpnode612.kjh.com] /dev/sda 932G 33M 932G 1% /data01
[gpnode612.kjh.com] /dev/sdb 894G 33M 894G 1% /data
[gpnode614.kjh.com] /dev/sda 932G 33M 932G 1% /data01
[gpnode614.kjh.com] /dev/sdb 932G 33M 932G 1% /data
[gpnode613.kjh.com] /dev/sdb 932G 33M 932G 1% /data
[gpnode613.kjh.com] /dev/sda 932G 33M 932G 1% /data01
su - gpadmin
cat >>/home/gpadmin/.bashrc<<-EOF
source /usr/local/greenplum-db/greenplum_path.sh
export MASTER_DATA_DIRECTORY=/usr/local/gpdata/gpmaster/gpseg-1
export PGPORT=5432
EOF
su - gpadmin
cat >>/home/gpadmin/.bash_profile<<-EOF
source /usr/local/greenplum-db/greenplum_path.sh
export MASTER_DATA_DIRECTORY=/usr/local/gpdata/gpmaster/gpseg-1
export PGPORT=5432
EOF
source ~/.bashrc
source ~/.bash_profile
cat >/usr/local/greenplum-db/all_segment<<-EOF
gpnode611.kjh.com
gpnode612.kjh.com
gpnode613.kjh.com
gpnode614.kjh.com
EOF
more /usr/local/greenplum-db/all_segment
gpnode611.kjh.com
gpnode612.kjh.com
gpnode613.kjh.com
gpnode614.kjh.com
cp $GPHOME/docs/cli_help/gpconfigs/gpinitsystem_config /usr/local/greenplum-db/gpinit_config
vi /usr/local/greenplum-db/gpinit_config
ARRAY_NAME="PT Greenplum Data Platform"
SEG_PREFIX=gpseg
PORT_BASE=40000
declare -a DATA_DIRECTORY=(/data/gpdata/primary /data/gpdata/primary /data01/gpdata/primary /data01/gpdata/primary )
MASTER_HOSTNAME=gpnode615.kjh.com
MASTER_DIRECTORY=/usr/local/gpdata/gpmaster
MASTER_PORT=5432
MIRROR_PORT_BASE=50000
REPLICATION_PORT_BASE=41000
MIRROR_REPLICATION_PORT_BASE=51000
TRUSTED_SHELL=ssh
CHECK_POINT_SEGMENTS=8
ENCODING=UNICODE
DATABASE_NAME=testgpdb
MACHINE_LIST_FILE=/usr/local/greenplum-db/all_segment
declare -a MIRROR_DATA_DIRECTORY=(/data/gpdata/mirror /data/gpdata/mirror /data01/gpdata/mirror /data01/gpdata/mirror)
gpinitsystem -c gpinit_config -h hostfile_segments
gpinitsystem命令参数解释:
-c:指定初始化文件。
-h:指定segment主机文件。
-s:指定standby主机,创建standby节点。
su - gpadmin
gpinitsystem -c /usr/local/greenplum-db/gpinit_config -h /usr/local/greenplum-db/all_segment -s gpnode616.kjh.com
=>[INFO]:-Greenplum Database Creation Parameters
=>[INFO]:---------------------------------------
=>[INFO]:-Master Configuration
=>[INFO]:---------------------------------------
=>[INFO]:-Master instance name = PT Greenplum Data Platform
=>[INFO]:-Master hostname = gpnode615.kjh.com
=>[INFO]:-Master port = 5432
=>[INFO]:-Master instance dir = /usr/local/gpdata/gpmaster/gpseg-1
=>[INFO]:-Master LOCALE = en_US.utf8
................
=>[INFO]:-Master IP address [4] = 172.20.6.15
=>[INFO]:-Master IP address [5] = 172.20.6.221
................
=>[INFO]:-Standby Master = gpnode616.kjh.com
=>[INFO]:-Primary segment # = 4
=>[INFO]:-Standby IP address = ::1
=>[INFO]:-Standby IP address = 172.20.6.16
=>[INFO]:-Standby IP address = 172.20.6.220
....................
=>[INFO]:----------------------------------------
=>[INFO]:-Greenplum Primary Segment Configuration
=>[INFO]:----------------------------------------
=>[INFO]:-gpnode611.kjh.com /data/gpdata/primary/gpseg0 40000 2 0 41000
=>[INFO]:-gpnode611.kjh.com /data/gpdata/primary/gpseg1 40001 3 1 41001
=>[INFO]:-gpnode611.kjh.com /data01/gpdata/primary/gpseg2 40002 4 2 41002
.........................
=>[INFO]:-gpnode614.kjh.com /data01/gpdata/primary/gpseg14 40002 16 14 41002
=>[INFO]:-gpnode614.kjh.com /data01/gpdata/primary/gpseg15 40003 17 15 41003
=>[INFO]:---------------------------------------
=>[INFO]:-Greenplum Mirror Segment Configuration
=>[INFO]:---------------------------------------
=>[INFO]:-gpnode612.kjh.com /data/gpdata/mirror/gpseg0 50000 18 0 51000
=>[INFO]:-gpnode612.kjh.com /data/gpdata/mirror/gpseg1 50001 19 1 51001
.........................
=>[INFO]:-gpnode611.kjh.com /data01/gpdata/mirror/gpseg14 50002 32 14 51002
=>[INFO]:-gpnode611.kjh.com /data01/gpdata/mirror/gpseg15 50003 33 15 51003
Continue with Greenplum creation Yy|Nn (default=N):
Y
=>
=>[INFO]:------------------------------------------------
=>[INFO]:-Parallel process exit status
=>[INFO]:------------------------------------------------
=>[INFO]:-Total processes marked as completed = 16
=>[INFO]:-Total processes marked as killed = 0
=>[INFO]:-Total processes marked as failed = 0
=>[INFO]:------------------------------------------------
=>[INFO]:-Commencing parallel build of mirror segment instances
............................
=>
=>[INFO]:------------------------------------------------
=>[INFO]:-Parallel process exit status
=>[INFO]:------------------------------------------------
=>[INFO]:-Total processes marked as completed = 16
=>[INFO]:-Total processes marked as killed = 0
=>[INFO]:-Total processes marked as failed = 0
=>[INFO]:------------------------------------------------
.....................略
=>
=>]:-Process results...
=>]:- Successful segment starts = 32
=>]:- Failed segment starts = 0
=>]:- Skipped segment starts (segments are marked down in configuration) = 0
=>]:-----------------------------------------------------
=>]:-Successfully started 32 of 32 segment instances
=>]:-----------------------------------------------------
.
=>]:------------------------------------------------------
=>]:-Greenplum standby master initialization parameters
=>]:------------------------------------------------------
=>]:-Greenplum master hostname = gpnode615.kjh.com
=>]:-Greenplum master data directory = /usr/local/gpdata/gpmaster/gpseg-1
=>]:-Greenplum master port = 5432
=>]:-Greenplum standby master hostname = gpnode616.kjh.com
=>]:-Greenplum standby master port = 5432
=>]:-Greenplum standby master data directory = /usr/local/gpdata/gpmaster/gpseg-1
=>]:-Greenplum update system catalog = On
=>:-Greenplum Database instance successfully created
=>]:-------------------------------------------------------
=>]:-To complete the environment configuration, please
=>]:-update gpadmin .bashrc file with the following
=>]:-1. Ensure that the greenplum_path.sh file is sourced
=>]:-2. Add "export MASTER_DATA_DIRECTORY=/usr/local/gpdata/gpmaster/gpseg-1"
=>]:- to access the Greenplum scripts for this instance:
=>]:- or, use -d /usr/local/gpdata/gpmaster/gpseg-1 option for the Greenplum scripts
=>]:- Example gpstate -d /usr/local/gpdata/gpmaster/gpseg-1
=>]:-Script log file = /home/gpadmin/gpAdminLogs/gpinitsystem_20180903.log
=>]:-To remove instance, run gpdeletesystem utility
=>]:-Standby Master gpnode616.kjh.com has been configured
=>]:-To activate the Standby Master Segment in the event of Master
=>]:-failure review options for gpactivatestandby
=>]:-------------------------------------------------------
=>]:-The Master /usr/local/gpdata/gpmaster/gpseg-1/pg_hba.conf post gpinitsystem
=>]:-has been configured to allow all hosts within this new
=>]:-array to intercommunicate. Any hosts external to this
=>]:-new array must be explicitly added to this file
=>]:-Refer to the Greenplum Admin support guide which is
=>]:-located in the /usr/local/greenplum-db/./docs directory
=>]:-------------------------------------------------------
psql -d testgpdb
gpdb=# alter user gpadmin with password 'gpadmin';
gpdb=# \q
#### 远程登陆
psql -p 5432 -d testgpdb
# 查询当前版本
select * from version();
vi ${MASTER_DATA_DIRECTORY}/postgresql.conf
# 设置监听IP (* 生产环境慎用)
listen_addresses = '${ host ip address } '
port = 5432
#### 设置数据库白名单
vi ${MASTER_DATA_DIRECTORY}/pg_hba.conf
# 增加如下一条记录,允许用户密码模式和可以登陆的IP地址范围;
#TYPE DATABASE USER CIDR-ADDRESS METHOD
host all gpadmin 0.0.0.0/0 md5
# 测试登陆
psql -p 5432 -h 172.20.6.15 -U gpadmin -d testgpdb
可以使用这篇文章的命令查看集群状态:
https://blog.51cto.com/michaelkang/2169857
https://gpdb.docs.pivotal.io/43170/install_guide/init_gpdb.html
https://gp-docs-cn.github.io/docs/utility_guide/admin_utilities/gprecoverseg.html
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。