一摘要
OpenStack系统由几个分别安装的关键服务组成。这些服务根据您的云需求一起工作,并包括计算,身份,网络,图像,数据块存储,对象存储,遥测,协调和数据库服务。您可以单独安装这些项目中的任何一个,并将其配置为独立或连接实体
二:开始
OpenStack项目是面向所有类型云的开源云计算平台,旨在实现简单,可扩展性强,功能丰富。来自世界各地的开发人员和云计算技术专家创建OpenStack项目。
三: 概览
四:安装前实验环境搭建
网络配置见官网
搭建计划
按照官方文档的第一种网络架构搭建一个用于学习的open stack开发环境,网络的ip配置和该网页所显示的配置相同,搭建有两个节点的测试环境,一个节点为controller节点,另一个节点为compute1节点。
环境
vmware虚拟机
centos7-minmal
内存大于8G
==下面有些值为tian的是密码,需要根据情况替换==
准备(控制节点和计算节点)
安装centos系统,安装系统的时候控制节点安和计算节点各添加两块网卡。
关系防火墙firedwall和selinux
systemctl stop firewalld.service #停止firewall
systemctl disable firewalld.service #禁止firewall开机启动
firewall-cmd --state #查看默认防火墙状态(关闭后显示notrunning,开启后显示running)
[root@dev-server ~] # getenforce #验证防火墙是否关闭
Disabled
[root@dev-server ~] # /usr/sbin/sestatus -v
SELinux status: disabled
vi /etc/selinux/config # 将SELINUX=enforcing改为SELINUX=disabled ,设置后需要重启才能生效
2.修改时区为上海
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime(直接覆盖)
3.修改主机名 vi /etc/hostname 加入主机名即可
主机名分别为 controller network compute1
4.修改ip信息和网络地址(网卡设置详见链接)
controller : 2张网卡
compute :2张网卡
unnumberd ip如下配置:
vim /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME
DEVICE=INTERFACE_NAME
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="none"
静态ip如下配置:
vim /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME
DEVICE=INTERFACE_NAME
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="static"
IPADDR=10.10.10.11
GATEWAY=10.10.0.2
NETMASK=255.255.255.0
DNS1=144.144.144.144
vim /etc/hosts
# controller
10.0.0.11 controller
# compute1
10.0.0.31 compute1
5.安装时间服务器
yum install chrony -y
vim /etc/chrony.conf
server controller iburst
server ntp1.aliyun.com iburst # 添加时间服务器,使用国内的时间服务器(并删除原来的)
allow 10.0.0.0/24
systemctl enable chronyd.service (重启并添加开机启动)
systemctl start chronyd.service
验证准备是否成功
计算节点
ping controller
ping www.baidu.com
控制节点
ping compute1
ping www.baidu.com
- 安装open stack包(pike)
yum install centos-release-openstack-pike
yum upgrade
yum install python-openstackclient
yum install openstack-selinux
- 安装数据库(只在控制节点)
yum install mariadb mariadb-server python2-PyMySQL
vim /etc/my.cnf.d/openstack.cnf (需要创建)
[mysqld]
bind-address = 0.0.0.0
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
systemctl enable mariadb.service # 重启服务并添加开机启动
systemctl start mariadb.service
mysql_secure_installation # 执行安全设置
- 安装消息队列
安装 rabbitmq, 端口 5672,添加 OpenStack 用户
yum install rabbitmq-server
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmqctl add_user openstack tian
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
9. 安装缓存
yum install memcached python-memcached
vim /etc/sysconfig/memcached
OPTIONS="-l 127.0.0.1,::1,controller"
systemctl enable memcached.service
systemctl start memcached.service
10 etcd是否安装似乎对测试环境没有影响,所以我也没装
安装配置认证服务keystone(最先安装)控制节点(操作前先创建快照)
- 为keystone配置数据库
mysql -u root -p
创建keystone数据库
CREATE DATABASE keystone ;
为keystone授权
-GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'tian' ;
-GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'tian' ;
2 .安装keystone
keystone 服务监听5000 和35357
安装keystone相关软件包
yum install openstack-keystone httpd mod_wsgi
- 修改keystone 配置keystone.conf
vim /etc/keystone/keystone.conf
修改[database]部分,配置数据库的链接
[database]
connection = mysql+pymysql://keystone:tian@controller/keystone
[token]
provider = fernet
为keystone数据库填充数据
su -s /bin/sh -c "keystone-manage db_sync" keystone
初始化fernet密钥库
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
引导身份服务(坑3,老版本和新版本的端口号码不一样)(注意下面要修改密码)
keystone-manage bootstrap --bootstrap-password tian \
--bootstrap-admin-url http://controller:35357/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne
- 配置Apache HTTP server
vim /etc/apache2/apache2.conf 配置ServerName为控制节点
加入
ServerName controller
创建一个指向/usr/share/keystone/wsgi-keystone.conf文件的链接
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
重启 HTTP服务器
systemctl enable httpd.service
systemctl start httpd.service
设置用户环境变量
export OS_USERNAME=admin
export OS_PASSWORD=tian (修改密码)
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
- 创建一个域、项目、用户和角色。
openstack project create --domain default \
--description "Service Project" service
openstack project create --domain default \
--description "Demo Project" demo
openstack user create --domain default \
--password-prompt demo
openstack role create user
openstack role add --project demo --user demo user
- 验证安装(需要去官网看输出是否类似,类似则安装成功)
unset OS_AUTH_URL OS_PASSWORD
openstack --os-auth-url http://controller:35357/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name admin --os-username admin token issue
openstack --os-auth-url http://controller:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name demo --os-username demo token issue
7.创建OpenStack客户端环境脚本。
vim admin-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=tian
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
vim demo-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=tian
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
. admin-openrc
openstack token issue
安装镜像服务image(最先安装)控制节点(操作前先创建快照)
- 安装和配置
- 创建数据库,服务凭据和API端点
mysql
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
IDENTIFIED BY 'tian';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
IDENTIFIED BY 'tian';
. admin-openrc
- 创建服务凭证
openstack user create --domain default --password-prompt glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
- 安装配置组件
yum install openstack-glance
编辑/etc/glance/glance-api.conf文件并完成以下操作
vim /etc/glance/glance-api.conf
在 [database] 部分,配置数据库访问
[database]
# ...
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
在 [keystone_authtoken] 和 [paste_deploy] 部分,配置认证服务访问
[keystone_authtoken]
# ...
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = tian
[paste_deploy]
# ...
flavor = keystone
在 [glance_store] 部分,配置本地文件系统存储和镜像文件位置
[glance_store]
# ...
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
vim /etc/glance/glance-registry.conf
在 [database] 部分,配置数据库访问
[database]
# ...
connection = mysql+pymysql://glance:tian@controller/glance
在 [keystone_authtoken] 和 [paste_deploy] 部分,配置认证服务访问
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = tian
[paste_deploy]
flavor = keystone
镜像服务数据库同步
su -s /bin/sh -c "glance-manage db_sync" glance
重启镜像服务:
systemctl enable openstack-glance-api.service \
openstack-glance-registry.service
systemctl start openstack-glance-api.service \
openstack-glance-registry.service
验证安装
. admin-openrc
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
openstack image create "cirros" \
--file cirros-0.3.5-x86_64-disk.img \
--disk-format qcow2 --container-format bare \
--public
openstack image list
2 验证是否安装成功
. admin-openrc
下载源镜像
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
使用 QCOW2 磁盘格式, bare容器格式上传镜像到镜像服务并设置公共可见,这样所有的项目都可以访问它
openstack image create "cirros" \
--file cirros-0.3.5-x86_64-disk.img \
--disk-format qcow2 --container-format bare \
--public
确认镜像的上传并验证属性
openstack image list
安装计算服务
1.概览
OpenStack计算组件请求OpenStack Identity服务进行认证;请求OpenStack Image服务提供磁盘镜像;为OpenStack dashboard提供用户与管理员接口。磁盘镜像访问限制在项目与用户上;配额以每个项目进行设定(例如,每个项目下可以创建多少实例)。OpenStack组件可以在标准硬件上水平大规模扩展,并且下载磁盘镜像启动虚拟机实例。
OpenStack计算服务由下列组件所构成
-
nova-api
服务接收和响应来自最终用户的计算API请求。此服务支持OpenStack计算服务API,Amazon EC2 API,以及特殊的管理API用于赋予用户做一些管理的操作。它会强制实施一些规则,发起多数的编排活动,例如运行一个实例。
-
nova-compute
服务接受来自虚拟机发送的元数据请求。
nova-api-metadata
服务一般在安装nova-network
服务的多主机模式下使用 -
nova-scheduler
服务
一个持续工作的守护进程,通过Hypervior的API来创建和销毁虚拟机实例。例如:XenServer/XCP 的 XenAPI KVM 或 QEMU 的 libvirt VMware 的 VMwareAPI 最为基本的,守护进程同意了来自队列的动作请求,转换为一系列的系统命令如启动一个KVM实例,然后,到数据库中更新它的状态。
-
nova-conductor
模块拿到一个来自队列请求虚拟机实例,然后决定那台计算服务器主机来运行它
-
nova-cert
模块媒介作用于
nova-compute
服务与数据库之间。它排除了由nova-compute
服务对云数据库的直接访问。nova-conductor模块可以水平扩展。但是,不要将它部署在运行nova-compute
服务的主机节点上 -
nova-network worker
守护进程服务器守护进程向Nova Cert服务提供X509证书。用来为
euca-bundle-image
生成证书。仅仅是在EC2 API的请求中使用 -
nova-consoleauth
守护进程与
nova-compute
服务类似,从队列中接受网络任务,并且操作网络。执行任务例如创建桥接的接口或者改变IPtables的规则。 nova-novncproxy
守护进程
-
nova-spicehtml5proxy
守护进程 -
nova-xvpvncproxy
守护进程 -
nova-cert
守护进程
控制节点安装
- 安装并配置控制节点
创键数据库
mysql
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'tian';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'tian';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'tian';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'tian';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'tian';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'tian';
创建服务实体,服务api端点
. admin-openrc
openstack user create --domain default --password-prompt nova
openstack role add --project service --user nova admin
openstack service create --name nova \
--description "OpenStack Compute" compute
openstack endpoint create --region RegionOne \
compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne \
compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne \
compute admin http://controller:8774/v2.1
openstack user create --domain default --password-prompt placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
安装并配置组件
yum install openstack-nova-api openstack-nova-conductor \
openstack-nova-console openstack-nova-novncproxy \
openstack-nova-scheduler openstack-nova-placement-api
编辑nova配置文件
vim /etc/nova/nova.conf
[api_database]
# ...
connection = mysql+pymysql://nova:tian@controller/nova_api
[database]
# ...
connection = mysql+pymysql://nova:tian@controller/nova
[DEFAULT]
# ...
transport_url = rabbit://openstack:tian@controller
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = tian
[DEFAULT]
# ...
my_ip = 10.0.0.11
[DEFAULT]
# ...
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
enabled = true
# ...
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
# ...
api_servers = http://controller:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp
[placement]
# ...
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:35357/v3
username = placement
password = tian
vim /etc/httpd/conf.d/00-nova-placement-api.conf
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
systemctl restart httpd
填充数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
nova-manage cell_v2 list_cells
重启所有服务
systemctl enable openstack-nova-api.service \
openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service \
openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service
计算节点
安装和配置组件
# yum install openstack-nova-compute
编辑nova配置文件
vim /etc/nova/nova.conf
[DEFAULT]
# ...
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:tian@controller
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = tian
[DEFAULT]
# ...
my_ip = 10.10.0.31
(将其中的 MANAGEMENT_INTERFACE_IP_ADDRESS 替换为计算节点上的管理网络接口的IP 地址,例如 :ref:`example architecture <overview-example-architectures>`中所示的第一个节点 10.0.0.31 。)
[DEFAULT]
...
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
# ...
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
...
api_servers = http://controller:9292
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:35357/v3
username = placement
password = tian
[oslo_concurrency]
...
lock_path = /var/lib/nova/tmp
egrep -c '(vmx|svm)' /proc/cpuinfo
(不支持硬件加速deep情况下需要加入该选项,执行上面的命令返回值为0则不支持,其他值为支持)
vim /etc/nova/nova-compute.conf
[libvirt]
...
virt_type = qemu
重启服务
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
控制节点
在控制节点添加cell数据库
. admin-openrc
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
当添加新的计算节点时,必须在控制器节点上运行以注册这些新的计算节点。或者,可以在以下位置设置适当的间隔 :nova-manage cell_v2 discover_hosts/etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
验证操作
. admin-openrc
openstack compute service list
openstack catalog list
nova-status upgrade check
neutron
控制节点
mysql -u root -p
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
IDENTIFIED BY 'tian';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
IDENTIFIED BY 'tian';
. admin-openrc
openstack user create --domain default --password-prompt neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron \
--description "OpenStack Networking" network
openstack endpoint create --region RegionOne \
network public http://controller:9696
openstack endpoint create --region RegionOne \
network internal http://controller:9696
openstack endpoint create --region RegionOne \
network admin http://controller:9696
配置网络节点(使用网络选项一,详见官方文档)
安装包
yum install openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge ebtables
编辑neutron配置文件
vim /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:tian@controller/neutron
[DEFAULT]
# ...
core_plugin = ml2
service_plugins =
transport_url = rabbit://openstack:tian@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[keystone_authtoken]
# ...
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = tian
[nova]
# ...
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = tian
[oslo_concurrency]
# ...
lock_path = /var/lib/neutron/tmp
配置ML2层插件
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
# ...
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
# ...
flat_networks = provider
[securitygroup]
# ...
enable_ipset = true
配置Linux网桥agent
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens33(需要替换为自己的网卡名)
[vxlan]
enable_vxlan = false
[securitygroup]
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
配置DHCP agent
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
# ...
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
继续配置控制节点
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
# ...
nova_metadata_host = controller
metadata_proxy_shared_secret = tian
vim /etc/nova/nova.conf
[neutron]
# ...
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = tian
service_metadata_proxy = true
metadata_proxy_shared_secret = tian
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
systemctl start neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
dashboard
yum install openstack-dashboard
vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['*']
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_NEUTRON_NETWORK = {
...
'enable_router': False,
'enable_quotas': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': False,
'enable_firewall': False,
'enable_vpn': False,
'enable_fip_topology_check': False,
}
TIME_ZONE = "Asia/Shanghai"
systemctl restart httpd.service memcached.service
验证安装
访问:http://controller/dashboard
使用 admin 用户 user 和 default domain 验证,密码是你在配置时设置的密码