文章目录
FreeIPA 用户规划
创建相关账号:
ipa user-add hadoopadmin --first=Hadoop --last=Admin
ipa group-add-member admins --users=hadoopadmin
创建密码:ipa passwd hadoopadmin admintianlingqun
登录测试:kinit hadoopadmin
ipa group-add ambari-managed-principals
ipa permission-add “Set User Password Expiration” --permissions=write --type=user --attrs=krbpasswordexpiration
ipa permission-add “Set Service Password Expiration” --permissions=write --type=service --attrs=krbpasswordexpiration
ipa privilege-add “Krbpass admin”
ipa privilege-add-permission “Krbpass admin” --permissions=“Set User Password Expiration”
ipa privilege-add-permission “Krbpass admin” --permissions=“Set Service Password Expiration”
ipa role-add-privilege “Security Architect” --privileges=“Krbpass admin”
ipa role-add-member “Security Architect” --groups=admins
一. 团队用户相关
1 创建相关 团队用户
先 kinit admin
kinit admin
1.1 etl 用户:用于数据清洗相关工作
ipa_user=etl
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={ipa_user} --last=
i
p
a
u
s
e
r
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{ipa_user} --shell=/bin/bash --homedir=/home/
ipauser−−shell=/bin/bash−−homedir=/home/{ipa_user}
1.2 bigdata 用户:“大数据” 团队用户,用于 数据清洗后的相关工作,比如 sqoop 数据同步,Spark or flink 实时任务 等
ipa_user=bigdata
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={ipa_user} --last=
i
p
a
u
s
e
r
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{ipa_user} --shell=/bin/bash --homedir=/home/
ipauser−−shell=/bin/bash−−homedir=/home/{ipa_user}
ipa_user=tianlingqun
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={ipa_user} --last=
i
p
a
u
s
e
r
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{ipa_user} --shell=/bin/bash --homedir=/home/
ipauser−−shell=/bin/bash−−homedir=/home/{ipa_user}
1.3 danalysis 用户:“数据分析” 团队用户,用于数据分析相关工作,比如:Hive 分层数据汇总等
ipa_user=danalysis
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={ipa_user} --last=
i
p
a
u
s
e
r
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{ipa_user} --shell=/bin/bash --homedir=/home/
ipauser−−shell=/bin/bash−−homedir=/home/{ipa_user}
1.4 azkaban 用户:用于 Azkaban 调度系统
ipa_user=azkaban
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={ipa_user} --last=
i
p
a
u
s
e
r
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{ipa_user} --shell=/bin/bash --homedir=/home/
ipauser−−shell=/bin/bash−−homedir=/home/{ipa_user}
1.5 hadoop 用户:hdfs 超级用户
ipa_user=hadoop
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={ipa_user} --last=
i
p
a
u
s
e
r
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{ipa_user} --shell=/bin/bash --homedir=/home/
ipauser−−shell=/bin/bash−−homedir=/home/{ipa_user}
1.6 livy
ipa_user=livy
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={ipa_user} --last=
i
p
a
u
s
e
r
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{ipa_user} --shell=/bin/bash --homedir=/home/
ipauser−−shell=/bin/bash−−homedir=/home/{ipa_user}
2. 创建相关团队用户的 home 目录 ( 在 onedts-dev-cdh-client-v01 服务器 操作)
2.1 etl
user_name=etl
user_home=/DATA/disk1/home/${user_name};
mkdir -p ${user_home}/security/keytabs/;
\cp /root/.bashrc ${user_home}/;
\cp /root/.bash_profile ${user_home}/;
chown -R
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} ${user_home}/;
cd /home/;
ln -s
u
s
e
r
h
o
m
e
.
/
{user_home} ./
userhome./{user_name};
2.2 bigdata
user_name=bigdata
user_home=/DATA/disk1/home/${user_name};
mkdir -p ${user_home}/security/keytabs/;
\cp /root/.bashrc ${user_home}/;
\cp /root/.bash_profile ${user_home}/;
chown -R
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} ${user_home}/;
cd /home/;
ln -s
u
s
e
r
h
o
m
e
.
/
{user_home} ./
userhome./{user_name};
2.3 danalysis
user_name=danalysis
user_home=/DATA/disk1/home/${user_name};
mkdir -p ${user_home}/security/keytabs/;
\cp /root/.bashrc ${user_home}/;
\cp /root/.bash_profile ${user_home}/;
chown -R
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} ${user_home}/;
cd /home/;
ln -s
u
s
e
r
h
o
m
e
.
/
{user_home} ./
userhome./{user_name};
2.4 azkaban
user_name=azkaban
user_home=/DATA/disk1/home/${user_name};
mkdir -p ${user_home}/security/keytabs/;
\cp /root/.bashrc ${user_home}/;
\cp /root/.bash_profile ${user_home}/;
chown -R
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} ${user_home}/;
cd /home/;
ln -s
u
s
e
r
h
o
m
e
.
/
{user_home} ./
userhome./{user_name};
2.5 hadoop
user_name=hadoop
user_home=/DATA/disk1/home/${user_name};
mkdir -p ${user_home}/security/keytabs/;
\cp /root/.bashrc ${user_home}/;
\cp /root/.bash_profile ${user_home}/;
chown -R
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} ${user_home}/;
cd /home/;
ln -s
u
s
e
r
h
o
m
e
.
/
{user_home} ./
userhome./{user_name};
3 创建相关团队用户的 kerberos 票据 ( 在 onedts-dev-cdh-client-v01 服务器 操作)
先 kinit admin
kinit admin
注意:一个用户的 有效票据文件永远是 “最后一次执行 ipa-getkeytab命令” 生成的文件,
所以,用户的票据文件要在多台服务器上使用的话,只能在某一台服务器生成 ${user_name}.keytab 后,再 scp 该文件到其他服务器
3.1 etl
user_name=etl
ipa_server=onedts-dev-cdh-ipa-v01.smartparkos.com
user_home=/DATA/disk1/home/${user_name};
ipa-getkeytab -s ${ipa_server}
-p ${user_name}
-k
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab
chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name}
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
chmod 400
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
ls -l
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
3.2 bigdata
user_name=bigdata
ipa_server=onedts-dev-cdh-ipa-v01.smartparkos.com
user_home=/DATA/disk1/home/${user_name};
ipa-getkeytab -s ${ipa_server}
-p ${user_name}
-k
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab
chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name}
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
chmod 400
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
ls -l
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
3.3 danalysis
user_name=danalysis
ipa_server=onedts-dev-cdh-ipa-v01.smartparkos.com
user_home=/DATA/disk1/home/${user_name};
ipa-getkeytab -s ${ipa_server}
-p ${user_name}
-k
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab
chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name}
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
chmod 400
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
ls -l
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
3.4 azkaban ( 该用户不需要经常登陆执行脚本等,故该步骤可以忽略)
user_name=azkaban
ipa_server=onedts-dev-cdh-ipa-v01.smartparkos.com
user_home=/DATA/disk1/home/${user_name};
ipa-getkeytab -s ${ipa_server}
-p ${user_name}
-k
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab
chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name}
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
chmod 400
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
ls -l
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
3.5 superset ( 用于 superset )
user_name=superset
ipa_server=onedts-dev-cdh-ipa-v01.smartparkos.com
user_home=/DATA/disk1/home/${user_name};
ipa-getkeytab -s ${ipa_server}
-p ${user_name}
-k
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab
chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name}
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
chmod 400
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
ls -l
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
3.5 hadoop
user_name=hadoop
ipa_server=onedts-dev-cdh-ipa-v01.smartparkos.com
user_home=/DATA/disk1/home/${user_name};
ipa-getkeytab -s ${ipa_server}
-p ${user_name}
-k
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab
chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name}
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
chmod 400
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
ls -l
u
s
e
r
h
o
m
e
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{user_home}/security/keytabs/
userhome/security/keytabs/{user_name}.keytab;
注意:配置好 hadoop 用户后,在 hdfs 组件配置 dfs.permissions.superusergroup 参数,类似如下:
dfs.permissions.superusergroup=hadoop
配置好后,进 hadooop 用户,给其他 “团队用户” 创建 hdfs HOME 目录,具体操作类似如下:
su - hadoop;
user_name=etl;
hdfs dfs -mkdir /user/${user_name};
hdfs dfs -chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} /user/
u
s
e
r
n
a
m
e
/
;
h
d
f
s
d
f
s
−
l
s
/
u
s
e
r
/
∣
g
r
e
p
"
{user_name}/; hdfs dfs -ls /user/ |grep "
username/;hdfsdfs−ls/user/∣grep"{user_name}"
user_name=bigdata;
hdfs dfs -mkdir /user/${user_name};
hdfs dfs -chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} /user/
u
s
e
r
n
a
m
e
/
;
h
d
f
s
d
f
s
−
l
s
/
u
s
e
r
/
∣
g
r
e
p
"
{user_name}/; hdfs dfs -ls /user/ |grep "
username/;hdfsdfs−ls/user/∣grep"{user_name}"
user_name=danalysis;
hdfs dfs -mkdir /user/${user_name};
hdfs dfs -chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} /user/
u
s
e
r
n
a
m
e
/
;
h
d
f
s
d
f
s
−
l
s
/
u
s
e
r
/
∣
g
r
e
p
"
{user_name}/; hdfs dfs -ls /user/ |grep "
username/;hdfsdfs−ls/user/∣grep"{user_name}"
user_name=azkaban;
hdfs dfs -mkdir /user/${user_name};
hdfs dfs -chown
u
s
e
r
n
a
m
e
:
{user_name}:
username:{user_name} /user/
u
s
e
r
n
a
m
e
/
;
h
d
f
s
d
f
s
−
l
s
/
u
s
e
r
/
∣
g
r
e
p
"
{user_name}/; hdfs dfs -ls /user/ |grep "
username/;hdfsdfs−ls/user/∣grep"{user_name}"
user_name=hadoop;
hdfs dfs -mkdir /user/${user_name};
4 创建相关 crontab,以定期刷新 用户的 kerberos 票据
4.1 etl
user_name=etl
su - ${user_name}
crontab -e # 添加如下内容
Refersh Kerberos 票据
0 0 * * * kinit -kt
H
O
M
E
/
s
e
c
u
r
i
t
y
/
k
e
y
t
a
b
s
/
{HOME}/security/keytabs/
HOME/security/keytabs/{USER}.keytab ${USER}
bigdata, danalysis, hadoop 等其他用户类似,不再赘述
二. 个人用户相关
1. yanghaoying
ipa_user=yanghaoying
user_first=haoying
user_last=yang
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={user_first} --last=
u
s
e
r
l
a
s
t
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{user_last} --shell=/bin/bash --homedir=/home/
userlast−−shell=/bin/bash−−homedir=/home/{ipa_user};
2. zhuyinghang
ipa_user=zhuyinghang
user_first=yinghang
user_last=zhu
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={user_first} --last=
u
s
e
r
l
a
s
t
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{user_last} --shell=/bin/bash --homedir=/home/
userlast−−shell=/bin/bash−−homedir=/home/{ipa_user};
3. xueming
ipa_user=xueming;
user_first=ming
user_last=xue
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={user_first} --last=
u
s
e
r
l
a
s
t
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{user_last} --shell=/bin/bash --homedir=/home/
userlast−−shell=/bin/bash−−homedir=/home/{ipa_user};
4. tianlingqun
ipa_user=tianlingqun;
user_first=youmou
user_last=luo
ipa user-add
i
p
a
u
s
e
r
−
−
f
i
r
s
t
=
{ipa_user} --first=
ipauser−−first={user_first} --last=
u
s
e
r
l
a
s
t
−
−
s
h
e
l
l
=
/
b
i
n
/
b
a
s
h
−
−
h
o
m
e
d
i
r
=
/
h
o
m
e
/
{user_last} --shell=/bin/bash --homedir=/home/
userlast−−shell=/bin/bash−−homedir=/home/{ipa_user};
三. 用户组 相关
1 创建相关 用户组
1.1 g_etl: 用于数据清洗相关工作
group_name=g_etl
ipa group-add
g
r
o
u
p
n
a
m
e
−
−
d
e
s
c
g
r
o
u
p
O
f
{group_name} --desc groupOf
groupname−−descgroupOf{group_name}
ipa group-add-member ${group_name}
–users=etl
–users=tianlingqun
1.2 g_bigdata: “大数据” 团队,用于 数据清洗后的相关工作,比如 sqoop 数据同步,Spark or flink 实时任务 等
group_name=g_bigdata
ipa group-add
g
r
o
u
p
n
a
m
e
−
−
d
e
s
c
g
r
o
u
p
O
f
{group_name} --desc groupOf
groupname−−descgroupOf{group_name}
ipa group-add-member ${group_name}
–users=bigdata
–users=tianlingqun
–users=zhuyinghang
1.3 g_danalysis: “数据分析” 团队,用于数据分析相关工作,比如:Hive 分层数据汇总等
group_name=g_danalysis
ipa group-add
g
r
o
u
p
n
a
m
e
−
−
d
e
s
c
g
r
o
u
p
O
f
{group_name} --desc groupOf
groupname−−descgroupOf{group_name}
ipa group-add-member ${group_name}
–users=danalysis
–users=yanghaoying
–users=zhuyinghang
–users=xueming
1.4 g_hue: 用于 hue 查询相关,所有需要利用 Hue 工具查询数据的用户,都需要添加到该组
group_name=g_hue
ipa group-add
g
r
o
u
p
n
a
m
e
−
−
d
e
s
c
g
r
o
u
p
O
f
{group_name} --desc groupOf
groupname−−descgroupOf{group_name}
ipa group-add-member ${group_name}
–users=tianlingqun
–users=yanghaoying
–users=zhuyinghang
–users=xueming
版权归原作者 与自己作战 所有, 如有侵权,请联系我们删除。