I had a cluster OpenShift with a master and a node. Today, it doesn't work anymore, OpenShift continously asks me the credentials, the following is the command and the output:
[root@openshift-master ~]# openshift start master --config=openshift.local.config/master/master-config.yaml
W0615 07:26:00.645598 2554 start_master.go:270] assetConfig.loggingPublicURL: Invalid value: "": required to view aggregated container logs in the console
W0615 07:26:00.645661 2554 start_master.go:270] assetConfig.metricsPublicURL: Invalid value: "": required to view cluster metrics in the console
I0615 07:26:01.123879 2554 plugins.go:71] No cloud provider specified.
I0615 07:26:01.220499 2554 genericapiserver.go:81] Adding storage destination for group
I0615 07:26:01.220532 2554 genericapiserver.go:81] Adding storage destination for group extensions
I0615 07:26:01.220552 2554 start_master.go:383] Starting master on 0.0.0.0:8443 (v1.2.0-rc1-dirty)
I0615 07:26:01.220556 2554 start_master.go:384] Public master address is https://192.168.1.181:8443
I0615 07:26:01.220568 2554 start_master.go:388] Using images from "openshift/origin-<component>:v1.2.0-rc1"
I0615 07:26:01.690075 2554 run.go:61] Started etcd at 192.168.1.181:4001
I0615 07:26:01.798909 2554 run_components.go:204] Using default project node label selector:
W0615 07:26:02.300146 2554 controller.go:297] Resetting endpoints for master service "kubernetes" to &{{ } {kubernetes default a7bc6c6c-23d3-11e6-a479-06337000002a 8 0 2016-05-27 08:24:24 +0200 CEST <nil> <nil> map[] map[]} [{[{192.168.1.181 <nil>}] [] [{https 8443 TCP} {dns 53 UDP} {dns-tcp 53 TCP}]}]}
I0615 07:26:02.455421 2554 master.go:262] Started Kubernetes API at 0.0.0.0:8443/api/v1
I0615 07:26:02.455465 2554 master.go:262] Started Kubernetes API Extensions at 0.0.0.0:8443/apis/extensions/v1beta1
I0615 07:26:02.455472 2554 master.go:262] Started Origin API at 0.0.0.0:8443/oapi/v1
I0615 07:26:02.455477 2554 master.go:262] Started OAuth2 API at 0.0.0.0:8443/oauth
I0615 07:26:02.455482 2554 master.go:262] Started Web Console 0.0.0.0:8443/console/
I0615 07:26:02.455487 2554 master.go:262] Started Swagger Schema API at 0.0.0.0:8443/swaggerapi/
I0615 07:26:04.512392 2554 run_components.go:199] DNS listening at 0.0.0.0:53
I0615 07:26:04.512442 2554 start_master.go:527] Controllers starting (*)
I0615 07:26:04.749125 2554 nodecontroller.go:143] Sending events to api server.
I0615 07:26:04.751339 2554 replication_controller.go:208] Starting RC Manager
I0615 07:26:04.752209 2554 horizontal.go:120] Starting HPA Controller
I0615 07:26:04.752554 2554 controller.go:211] Starting Daemon Sets controller manager
W0615 07:26:05.100389 2554 nodecontroller.go:671] Missing timestamp for Node openshift-node1. Assuming now as a timestamp.
W0615 07:26:05.100409 2554 nodecontroller.go:671] Missing timestamp for Node openshift-node2. Assuming now as a timestamp.
I0615 07:26:05.101485 2554 event.go:211] Event(api.ObjectReference{Kind:"Node", Namespace:"", Name:"openshift-node1", UID:"openshift-node1", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node openshift-node1 event: Registered Node openshift-node1 in NodeController
I0615 07:26:05.101522 2554 event.go:211] Event(api.ObjectReference{Kind:"Node", Namespace:"", Name:"openshift-node2", UID:"openshift-node2", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node openshift-node2 event: Registered Node openshift-node2 in NodeController
F0615 07:26:05.158602 2554 master.go:93] Failed to get supported resources from server: the server has asked for the client to provide credentials
I0615 07:26:05.221398 2554 endpoints_controller.go:283] Waiting for pods controller to sync, requeuing rc default/php-apache
I0615 07:26:05.221502 2554 endpoints_controller.go:283] Waiting for pods controller to sync, requeuing rc default/redis-slave
The yaml configuration file is this:
admissionConfig:
pluginConfig: null
apiLevels:
- v1
apiVersion: v1
assetConfig:
extensionDevelopment: false
extensionScripts: null
extensionStylesheets: null
extensions: null
loggingPublicURL: ""
logoutURL: ""
masterPublicURL: https://192.168.1.181:8443
metricsPublicURL: ""
publicURL: https://192.168.1.181:8443/console/
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
certFile: master.server.crt
clientCA: ""
keyFile: master.server.key
maxRequestsInFlight: 0
namedCertificates: null
requestTimeoutSeconds: 0
controllerLeaseTTL: 0
controllers: '*'
corsAllowedOrigins:
- 127.0.0.1
- 192.168.1.181:8443
- localhost
disabledFeatures: null
dnsConfig:
allowRecursiveQueries: false
bindAddress: 0.0.0.0:53
bindNetwork: tcp4
etcdClientInfo:
ca: ca.crt
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
- https://192.168.1.181:4001
etcdConfig:
address: 192.168.1.181:4001
peerAddress: 192.168.1.181:7001
peerServingInfo:
bindAddress: 0.0.0.0:7001
bindNetwork: tcp4
certFile: etcd.server.crt
clientCA: ca.crt
keyFile: etcd.server.key
namedCertificates: null
servingInfo:
bindAddress: 0.0.0.0:4001
bindNetwork: tcp4
certFile: etcd.server.crt
clientCA: ca.crt
keyFile: etcd.server.key
namedCertificates: null
storageDirectory: /root/openshift.local.etcd
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
kubernetesStorageVersion: v1
openShiftStoragePrefix: openshift.io
openShiftStorageVersion: v1
imageConfig:
format: openshift/origin-${component}:${version}
latest: false
imagePolicyConfig:
disableScheduledImport: false
maxImagesBulkImportedPerRepository: 5
maxScheduledImageImportsPerMinute: 60
scheduledImageImportMinimumIntervalSeconds: 900
kind: MasterConfig
kubeletClientInfo:
ca: ca.crt
certFile: master.kubelet-client.crt
keyFile: master.kubelet-client.key
port: 10250
kubernetesMasterConfig:
admissionConfig:
pluginConfig: null
apiLevels: null
apiServerArguments: null
controllerArguments: null
disabledAPIGroupVersions: {}
masterCount: 1
masterIP: 192.168.1.181
podEvictionTimeout: 5m
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
schedulerConfigFile: ""
servicesNodePortRange: 30000-32767
servicesSubnet: 172.30.0.0/16
staticNodeNames: null
masterClients:
externalKubernetesKubeConfig: ""
openshiftLoopbackKubeConfig: openshift-master.kubeconfig
masterPublicURL: https://192.168.1.181:8443
networkConfig:
clusterNetworkCIDR: 10.128.0.0/14
externalIPNetworkCIDRs: null
hostSubnetLength: 9
networkPluginName: ""
serviceNetworkCIDR: 172.30.0.0/16
oauthConfig:
alwaysShowProviderSelection: false
assetPublicURL: https://192.168.1.181:8443/console/
grantConfig:
method: auto
identityProviders:
- challenge: true
login: true
mappingMethod: claim
name: anypassword
provider:
apiVersion: v1
kind: AllowAllPasswordIdentityProvider
masterCA: ca-bundle.crt
masterPublicURL: https://192.168.1.181:8443
masterURL: https://192.168.1.181:8443
sessionConfig:
sessionMaxAgeSeconds: 300
sessionName: ssn
sessionSecretsFile: ""
templates: null
tokenConfig:
accessTokenMaxAgeSeconds: 86400
authorizeTokenMaxAgeSeconds: 300
pauseControllers: false
policyConfig:
bootstrapPolicyFile: policy.json
openshiftInfrastructureNamespace: openshift-infra
openshiftSharedResourcesNamespace: openshift
userAgentMatchingConfig:
defaultRejectionMessage: ""
deniedClients: null
requiredClients: null
projectConfig:
defaultNodeSelector: ""
projectRequestMessage: ""
projectRequestTemplate: ""
securityAllocator:
mcsAllocatorRange: s0:/2
mcsLabelsPerProject: 5
uidAllocatorRange: 1000000000-1999999999/10000
routingConfig:
subdomain: router.default.svc.cluster.local
serviceAccountConfig:
limitSecretReferences: false
managedNames:
- default
- builder
- deployer
masterCA: ca-bundle.crt
privateKeyFile: serviceaccounts.private.key
publicKeyFiles:
- serviceaccounts.public.key
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
certFile: master.server.crt
clientCA: ca.crt
keyFile: master.server.key
maxRequestsInFlight: 500
namedCertificates: null
requestTimeoutSeconds: 3600
volumeConfig:
dynamicProvisioningEnabled: true
This file is generated with this command:
openshift start master --write-config=openshift.local.config/master
Anyone can help me please? Thanks in advance.
Your authentication session token has expired. You need to
oc login
again.The expiration time is controlled by the
oauthConfig.tokenConfig.accessTokenMaxAgeSeconds
field of your master config file.