Skip to content

Commit b97d6aa

Browse files
Merge pull request openshift#1515 from richm/user-creation-with-new-installer
add back tests that require user creation
2 parents 562a4c4 + df5082e commit b97d6aa

File tree

9 files changed

+173
-39
lines changed

9 files changed

+173
-39
lines changed

hack/testing/entrypoint.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,8 @@ if oc get clusterlogging example > /dev/null 2>&1 ; then
7575
# cannot mount file inside pod into another pod - rewrite to use a configmap or secret
7676
# test-viaq-data-model
7777
expected_failures=(
78-
check-logs test-access-control test-kibana-dashboards test-multi-tenancy
7978
test-out_rawtcp test-remote-syslog test-zzz-duplicate-entries
8079
test-read-throttling test-viaq-data-model test-zzzz-bulk-rejection
81-
8280
)
8381
else
8482
expected_failures=(
@@ -206,6 +204,8 @@ function cleanup() {
206204
}
207205
trap "cleanup" EXIT
208206

207+
rm -f ${OS_O_A_L_DIR}/temp/htpw.file
208+
209209
if [[ -z "${TEST_ONLY:-}" ]]; then
210210
"${OS_O_A_L_DIR}/hack/testing/setup.sh"
211211
elif [[ -z "${KUBECONFIG:-}" ]]; then

hack/testing/util.sh

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,120 @@ function get_es_cert_path() {
8383
echo ${OS_O_A_L_DIR}/temp/es_certs
8484
}
8585

86+
function create_users() {
87+
# args are username password true|false (isadmin)
88+
local addeduser=""
89+
local username=""
90+
local pwd=""
91+
local isadmin=""
92+
local existingusers=""
93+
for item in "$@" ; do
94+
if [ -z "$username" ] ; then username="$item" ; continue ; fi
95+
if [ -z "$pwd" ] ; then pwd="$item" ; continue ; fi
96+
if [ -z "$isadmin" ] ; then isadmin="$item" ; fi
97+
if oc get user $username > /dev/null 2>&1 ; then
98+
existingusers="$existingusers $username"
99+
else
100+
echo create_users: new user $username
101+
oc get users
102+
if [ ! -d ${OS_O_A_L_DIR}/temp ] ; then
103+
mkdir -p ${OS_O_A_L_DIR}/temp
104+
fi
105+
local htpwargs="-b"
106+
if [ ! -f ${OS_O_A_L_DIR}/temp/htpw.file ] ; then
107+
htpwargs="$htpwargs -c"
108+
fi
109+
htpasswd $htpwargs ${OS_O_A_L_DIR}/temp/htpw.file "$username" "$pwd"
110+
addeduser=true
111+
fi
112+
username=""
113+
pwd=""
114+
isadmin=""
115+
done
116+
if [ "${addeduser:-false}" = true ] ; then
117+
# set management state to managed for auth operator
118+
local mgmtstate=$( oc get authentication.operator cluster -o jsonpath='{.spec.managementState}' )
119+
if [ "$mgmtstate" != Managed ] ; then
120+
oc patch authentication.operator cluster --type=merge -p "{\"spec\":{\"managementState\": \"Managed\"}}"
121+
fi
122+
# kick the console pods because they cache oauth metadata (temporary, should not be required)
123+
oc delete pods -n openshift-console --all --force --grace-period=0
124+
125+
# kick the monitoring pods because they cache oauth metadata (temporary, should not be required)
126+
oc delete pods -n openshift-monitoring --all --force --grace-period=0
127+
128+
# see if there is already an htpass-secret - if so, delete it and recreate it
129+
if oc -n openshift-config get secret htpass-secret > /dev/null 2>&1 ; then
130+
oc -n openshift-config delete secret htpass-secret
131+
os::cmd::try_until_failure "oc -n openshift-config get secret htpass-secret > /dev/null 2>&1"
132+
fi
133+
oc -n openshift-config create secret generic htpass-secret --from-file=htpasswd=${OS_O_A_L_DIR}/temp/htpw.file
134+
os::cmd::try_until_success "oc -n openshift-config get secret htpass-secret > /dev/null 2>&1"
135+
# configure HTPasswd IDP if not already configured
136+
if ! oc get oauth -o jsonpath='{.items[*].spec.identityProviders[*].type}' | grep -q -i '^htpasswd$' ; then
137+
oc apply -f - <<EOF
138+
apiVersion: config.openshift.io/v1
139+
kind: OAuth
140+
metadata:
141+
name: cluster
142+
spec:
143+
identityProviders:
144+
- name: htpassidp
145+
challenge: true
146+
login: true
147+
mappingMethod: claim
148+
type: HTPasswd
149+
htpasswd:
150+
fileData:
151+
name: htpass-secret
152+
EOF
153+
fi
154+
# fix ca cert in kubeconfig - should not be necessary much longer
155+
if [ ! -d ${OS_O_A_L_DIR}/temp ] ; then
156+
mkdir -p ${OS_O_A_L_DIR}/temp
157+
fi
158+
if [ ! -f ${OS_O_A_L_DIR}/temp/tls.crt ] ; then
159+
oc -n openshift-ingress-operator extract secret/router-ca --to=${OS_O_A_L_DIR}/temp --keys=tls.crt
160+
oc config view --minify -o go-template='{{index .clusters 0 "cluster" "certificate-authority-data" }}' --raw=true | base64 -d > ${OS_O_A_L_DIR}/temp/current-ca.crt
161+
cat ${OS_O_A_L_DIR}/temp/tls.crt >> ${OS_O_A_L_DIR}/temp/current-ca.crt
162+
for cluster in $( oc config get-clusters | grep -v NAME ) ; do
163+
oc config set-cluster "$cluster" --certificate-authority=${OS_O_A_L_DIR}/temp/current-ca.crt --embed-certs=true
164+
done
165+
fi
166+
# iterate over the users again, doing the oc login to ensure they exist
167+
while [ -n "${1:-}" ] ; do
168+
username="$1" ; shift
169+
pwd="$1" ; shift
170+
isadmin="$1"; shift
171+
local existinguser=""
172+
local skip=""
173+
if [ -n "$existingusers" ] ; then
174+
for existinguser in $existingusers ; do
175+
if [ "$existinguser" = "$username" ] ; then
176+
skip=true
177+
fi
178+
done
179+
fi
180+
if [ "${skip:-false}" = true ] ; then
181+
continue
182+
fi
183+
# wait until oc login succeeds
184+
os::cmd::try_until_success "oc login -u '$username' -p '$pwd'" $((3 * minute)) 3
185+
oc login -u system:admin
186+
if [ $isadmin = true ] ; then
187+
echo adding cluster-admin role to user "$username"
188+
oc adm policy add-cluster-role-to-user cluster-admin "$username"
189+
else
190+
echo user "$username" is not an admin user
191+
fi
192+
done
193+
fi
194+
}
195+
86196
# set the test_token, test_name, and test_ip for token auth
87197
function get_test_user_token() {
88198
local current_project; current_project="$( oc project -q )"
199+
create_users ${1:-${LOG_ADMIN_USER:-admin}} ${2:-${LOG_ADMIN_PW:-admin}} ${3:-true}
89200
oc login --username=${1:-${LOG_ADMIN_USER:-admin}} --password=${2:-${LOG_ADMIN_PW:-admin}} > /dev/null
90201
test_token="$(oc whoami -t)"
91202
test_name="$(oc whoami)"

openshift/ci-operator/build-image/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,4 @@
33
FROM openshift/origin-release:golang-1.10
44

55
RUN yum -y install epel-release && \
6-
yum -y install jq bc sudo
6+
yum -y install jq bc sudo httpd-tools procps-ng

openshift/ci-operator/build-image/Dockerfile.full

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
FROM openshift/origin-release:golang-1.10
77

88
RUN yum -y install epel-release && \
9-
yum -y install jq bc sudo
9+
yum -y install jq bc sudo httpd-tools procps-ng coreutils
1010

1111
RUN mkdir -p /go/src/github.com/openshift/origin-aggregated-logging/
1212
ADD Makefile /go/src/github.com/openshift/origin-aggregated-logging/

openshift/ci-operator/build-image/launch-e2e-test.sh

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,15 @@ fi
1616
if ! type -p sudo > /dev/null 2>&1 ; then
1717
pkgs="$pkgs sudo"
1818
fi
19+
if ! type -p htpasswd > /dev/null 2>&1 ; then
20+
pkgs="$pkgs httpd-tools"
21+
fi
22+
if ! type -p watch > /dev/null 2>&1 ; then
23+
pkgs="$pkgs procps-ng"
24+
fi
25+
if ! type -p base64 > /dev/null 2>&1 ; then
26+
pkgs="$pkgs coreutils"
27+
fi
1928
if [ -n "$pkgs" ] ; then
2029
yum -y install $pkgs
2130
fi

test/access_control.sh

Lines changed: 39 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,17 @@ os::util::environment::use_sudo
99
os::test::junit::declare_suite_start "test/access_control"
1010

1111
LOGGING_NS=${LOGGING_NS:-openshift-logging}
12+
1213
espod=$( get_es_pod es )
1314
esopspod=$( get_es_pod es-ops )
1415
esopspod=${esopspod:-$espod}
1516
es_svc=$( get_es_svc es )
1617
es_ops_svc=$( get_es_svc es-ops )
1718
es_ops_svc=${es_ops_svc:-$es_svc}
1819

20+
# enable debug logging for searchguard and o-e-plugin
21+
#curl_es $es_svc /_cluster/settings -XPUT -d '{"transient":{"logger.com.floragunn.searchguard":"TRACE","logger.io.fabric8.elasticsearch":"TRACE"}}'
22+
1923
delete_users=""
2024
REUSE=${REUSE:-false}
2125

@@ -28,6 +32,11 @@ function cleanup() {
2832
done
2933
fi
3034
if [ -n "${espod:-}" ] ; then
35+
oc exec -c elasticsearch $espod -- es_acl get --doc=roles > $ARTIFACT_DIR/roles
36+
oc exec -c elasticsearch $espod -- es_acl get --doc=rolesmapping > $ARTIFACT_DIR/rolesmapping
37+
oc exec -c elasticsearch $espod -- es_acl get --doc=actiongroups > $ARTIFACT_DIR/actiongroups
38+
oc logs -c elasticsearch $espod > $ARTIFACT_DIR/es.log
39+
oc exec -c elasticsearch $espod -- logs >> $ARTIFACT_DIR/es.log
3140
curl_es_pod $espod /project.access-control-* -XDELETE > /dev/null
3241
fi
3342
for proj in access-control-1 access-control-2 access-control-3 ; do
@@ -49,10 +58,9 @@ function create_user_and_assign_to_projects() {
4958
os::log::info Using existing user $user
5059
else
5160
os::log::info Creating user $user with password $pw
52-
oc login --username=$user --password=$pw 2>&1 | artifact_out
61+
create_users "$user" "$pw" false 2>&1 | artifact_out
5362
delete_users="$delete_users $user"
5463
fi
55-
oc login --username=system:admin 2>&1 | artifact_out
5664
os::log::info Assigning user to projects "$@"
5765
while [ -n "${1:-}" ] ; do
5866
oc project $1 2>&1 | artifact_out
@@ -107,7 +115,7 @@ function test_user_has_proper_access() {
107115
if [ "$espod" = "$esopspod" ] ; then
108116
esopshost=$eshost
109117
fi
110-
get_test_user_token $user $pw
118+
get_test_user_token $user $pw false
111119
for proj in "$@" ; do
112120
if [ "$proj" = "--" ] ; then
113121
expected=0
@@ -136,12 +144,28 @@ function test_user_has_proper_access() {
136144
# make sure no access with incorrect auth
137145
# bogus token
138146
os::log::info Checking access providing bogus token
139-
os::cmd::expect_success_and_text "curl_es_pod_with_token $espod '/project.$proj.*/_count' BOGUS -w '%{response_code}\n'" '401$'
140-
os::cmd::expect_success_and_text "curl_es_from_kibana $kpod $eshost '/project.$proj.*/_count' BOGUS -w '%{response_code}\n'" '.*403$'
147+
if ! os::cmd::expect_success_and_text "curl_es_pod_with_token $espod '/project.$proj.*/_count' BOGUS -w '%{response_code}\n'" '401$'; then
148+
os::log::error invalid access from es with BOGUS token
149+
curl_es_pod_with_token $espod "/project.$proj.*/_count" BOGUS -v || :
150+
exit 1
151+
fi
152+
if ! os::cmd::expect_success_and_text "curl_es_from_kibana $kpod $eshost '/project.$proj.*/_count' BOGUS -w '%{response_code}\n'" '.*403$'; then
153+
os::log::error invalid access from kibana with BOGUS token
154+
curl_es_from_kibana $kpod $eshost "/project.$proj.*/_count" BOGUS -v || :
155+
exit 1
156+
fi
141157
# no token
142158
os::log::info Checking access providing no username or token
143-
os::cmd::expect_success_and_text "curl_es_pod_with_token $espod '/project.$proj.*/_count' '' -w '%{response_code}\n'" '401$'
144-
os::cmd::expect_success_and_text "curl_es_from_kibana $kpod $eshost '/project.$proj.*/_count' '' -w '%{response_code}\n' -o /dev/null" '403$'
159+
if ! os::cmd::expect_success_and_text "curl_es_pod_with_token $espod '/project.$proj.*/_count' '' -w '%{response_code}\n'" '401$'; then
160+
os::log::error invalid access from es with empty token
161+
curl_es_pod_with_token $espod "/project.$proj.*/_count" "" -v || :
162+
exit 1
163+
fi
164+
if ! os::cmd::expect_success_and_text "curl_es_from_kibana $kpod $eshost '/project.$proj.*/_count' '' -w '%{response_code}\n' -o /dev/null" '403$'; then
165+
os::log::error invalid access from kibana with empty token
166+
curl_es_from_kibana $kpod $eshost "/project.$proj.*/_count" "" -v || :
167+
exit 1
168+
fi
145169
fi
146170
done
147171

@@ -186,21 +210,6 @@ done
186210
LOG_ADMIN_USER=${LOG_ADMIN_USER:-admin}
187211
LOG_ADMIN_PW=${LOG_ADMIN_PW:-admin}
188212

189-
if oc get users "$LOG_ADMIN_USER" > /dev/null 2>&1 ; then
190-
echo Using existing admin user $LOG_ADMIN_USER 2>&1 | artifact_out
191-
else
192-
os::log::info Creating cluster-admin user $LOG_ADMIN_USER
193-
current_project="$( oc project -q )"
194-
oc login --username=$LOG_ADMIN_USER --password=$LOG_ADMIN_PW 2>&1 | artifact_out
195-
oc login --username=system:admin 2>&1 | artifact_out
196-
oc project $current_project 2>&1 | artifact_out
197-
fi
198-
oc adm policy add-cluster-role-to-user cluster-admin $LOG_ADMIN_USER 2>&1 | artifact_out
199-
os::log::info workaround access_control admin failures - sleep 60 seconds to allow system to process cluster role setting
200-
sleep 60
201-
oc policy can-i '*' '*' --user=$LOG_ADMIN_USER 2>&1 | artifact_out
202-
oc get users 2>&1 | artifact_out
203-
204213
# if you ever want to run this test again on the same machine, you'll need to
205214
# use different usernames, otherwise you'll get this odd error:
206215
# # oc login --username=loguser --password=loguser
@@ -212,6 +221,13 @@ LOG_NORMAL_PW=${LOG_NORMAL_PW:-loguserac-$RANDOM}
212221
LOG_USER2=${LOG_USER2:-loguser2ac-$RANDOM}
213222
LOG_PW2=${LOG_PW2:-loguser2ac-$RANDOM}
214223

224+
create_users $LOG_NORMAL_USER $LOG_NORMAL_PW false $LOG_USER2 $LOG_PW2 false $LOG_ADMIN_USER $LOG_ADMIN_PW true 2>&1 | artifact_out
225+
226+
os::log::info workaround access_control admin failures - sleep 60 seconds to allow system to process cluster role setting
227+
sleep 60
228+
oc auth can-i '*' '*' --user=$LOG_ADMIN_USER 2>&1 | artifact_out
229+
oc get users 2>&1 | artifact_out
230+
215231
create_user_and_assign_to_projects $LOG_NORMAL_USER $LOG_NORMAL_PW access-control-1 access-control-2
216232
create_user_and_assign_to_projects $LOG_USER2 $LOG_PW2 access-control-2 access-control-3
217233

@@ -227,7 +243,7 @@ if [ ${LOGGING_NS} = "logging" ] ; then
227243
fi
228244

229245
os::log::info now auth using admin + token
230-
get_test_user_token $LOG_ADMIN_USER $LOG_ADMIN_PW
246+
get_test_user_token $LOG_ADMIN_USER $LOG_ADMIN_PW true
231247
if [ ${LOGGING_NS} = "logging" ] && [ $espod != $esopspod] ; then
232248
nrecs=$( curl_es_pod_with_token $espod "/${logging_index}/_count" $test_token | get_count_from_json )
233249
os::cmd::expect_success "test $nrecs -gt 1"

test/cluster/functionality.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ os::test::junit::declare_suite_start "test/cluster/functionality"
4343
# We need to use a name and token for logging checks later,
4444
# so we have to provision a user with a token for this.
4545
# TODO: Why is this necessary?
46+
create_users ${LOG_ADMIN_USER:-admin} ${LOG_ADMIN_PW:-admin} true
4647
os::cmd::expect_success "oc login --username=${LOG_ADMIN_USER:-admin} --password=${LOG_ADMIN_PW:-admin}"
4748
test_user="$( oc whoami )"
4849
test_token="$( oc whoami -t )"
@@ -118,7 +119,7 @@ for elasticsearch_pod in $( get_es_pod ${OAL_ELASTICSEARCH_COMPONENT} ); do
118119
for kibana_pod in $( oc get pods --selector component="${OAL_KIBANA_COMPONENT}" -o jsonpath='{ .items[*].metadata.name }' ); do
119120
os::log::info "Checking for index ${index} with Kibana pod ${kibana_pod}..."
120121
# As we're checking system log files, we need to use `sudo`
121-
os::cmd::expect_success "sudo -E VERBOSE=true go run '${OS_O_A_L_DIR}/hack/testing/check-logs.go' '${kibana_pod}' '${elasticsearch_api}' '${index}' '${index_search_path}' '${query_size}' '${test_user}' '${test_token}' '${test_ip}'"
122+
os::cmd::expect_success "sudo -E env PATH=$PATH VERBOSE=true go run '${OS_O_A_L_DIR}/hack/testing/check-logs.go' '${kibana_pod}' '${elasticsearch_api}' '${index}' '${index_search_path}' '${query_size}' '${test_user}' '${test_token}' '${test_ip}'"
122123
done
123124
done
124125

test/kibana_dashboards.sh

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,8 @@ if oc get users "$LOG_ADMIN_USER" > /dev/null 2>&1 ; then
3030
os::log::debug Using existing user $LOG_ADMIN_USER
3131
else
3232
os::log::info Creating cluster-admin user $LOG_ADMIN_USER
33-
current_project="$( oc project -q )"
34-
os::log::debug "$( oc login --username=$LOG_ADMIN_USER --password=$LOG_ADMIN_PW )"
35-
os::log::debug "$( oc login --username=system:admin )"
36-
os::log::debug "$( oc project $current_project )"
33+
create_users $LOG_ADMIN_USER $LOG_ADMIN_PW true 2>&1 | artifact_out
3734
fi
38-
os::log::debug "$( oc adm policy add-cluster-role-to-user cluster-admin $LOG_ADMIN_USER )"
3935

4036
function cleanup() {
4137
set +e
@@ -53,7 +49,7 @@ done
5349

5450
# use admin user created in logging framework
5551
# make sure admin kibana index exists - log in to ES as admin user
56-
get_test_user_token $LOG_ADMIN_USER $LOG_ADMIN_PW
52+
get_test_user_token $LOG_ADMIN_USER $LOG_ADMIN_PW true
5753
for pod in $espod $esopspod ; do
5854
curl_es_pod_with_token $pod "/" "$test_token" | curl_output
5955
# add the ui objects

test/multi_tenancy.sh

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ function create_user_and_assign_to_projects() {
6060
os::log::info Using existing user $user
6161
else
6262
os::log::info Creating user $user with password $pw
63-
oc login --username=$user --password=$pw 2>&1 | artifact_out
63+
create_users $user $pw false 2>&1 | artifact_out
6464
delete_users="$delete_users $user"
6565
fi
6666
os::log::debug "$( oc login --username=system:admin 2>&1 )"
@@ -90,7 +90,7 @@ function test_user_has_proper_access() {
9090
# rest - indices to which access should be granted
9191
for proj in "$@" ; do
9292
os::log::info See if user $user can read /project.$proj.*
93-
get_test_user_token $user $pw
93+
get_test_user_token $user $pw false
9494
nrecs=$( curl_es_pod_with_token $espod "/project.$proj.*/_count" $test_token | \
9595
get_count_from_json )
9696
if ! os::cmd::expect_success "test $nrecs = 1" ; then
@@ -108,7 +108,7 @@ function test_user_has_proper_access() {
108108

109109
# test user has access for msearch for multiple indices
110110
os::log::info See if user $user can _msearch "$indices"
111-
get_test_user_token $user $pw
111+
get_test_user_token $user $pw false
112112
nrecs=$( { echo '{"index":'"${indices}"'}'; echo '{"size":0,"query":{"match_all":{}}}'; } | \
113113
curl_es_pod_with_token_and_input $espod "/_msearch" $test_token -XPOST --data-binary @- | \
114114
get_count_from_json_from_search )
@@ -124,7 +124,7 @@ function test_user_has_proper_access() {
124124

125125
# verify normal user has no access to default indices
126126
os::log::info See if user $user is denied /project.default.*
127-
get_test_user_token $user $pw
127+
get_test_user_token $user $pw false
128128
nrecs=$( curl_es_pod_with_token $espod "/project.default.*/_count" $test_token | \
129129
get_count_from_json )
130130
if ! os::cmd::expect_success "test $nrecs = 0" ; then
@@ -135,7 +135,7 @@ function test_user_has_proper_access() {
135135

136136
# verify normal user has no access to .operations
137137
os::log::info See if user $user is denied /.operations.*
138-
get_test_user_token $user $pw
138+
get_test_user_token $user $pw false
139139
nrecs=$( curl_es_pod_with_token $esopspod "/.operations.*/_count" $test_token | \
140140
get_count_from_json )
141141
if ! os::cmd::expect_success "test $nrecs = 0" ; then
@@ -165,6 +165,7 @@ LOG_NORMAL_PW=${LOG_NORMAL_PW:-loguser1-$RANDOM}
165165
LOG_USER2=${LOG_USER2:-loguser2-$RANDOM}
166166
LOG_PW2=${LOG_PW2:-loguser2-$RANDOM}
167167

168+
create_users $LOG_NORMAL_USER $LOG_NORMAL_PW false $LOG_USER2 $LOG_PW2 false 2>&1 | artifact_out
168169
create_user_and_assign_to_projects $LOG_NORMAL_USER $LOG_NORMAL_PW multi-tenancy-1 multi-tenancy-2
169170
create_user_and_assign_to_projects $LOG_USER2 $LOG_PW2 multi-tenancy-2 multi-tenancy-3
170171

0 commit comments

Comments
 (0)