Centos install midonet Kilo Version

管理节点

基础环境

yum install -y yum-plugin-priorities
# EPEL
yum install -y http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-9.noarch.rpm
# RDO kilo 法一
yum install https://repos.fedorapeople.org/repos/openstack/EOL/openstack-kilo/rdo-release-kilo-2.noarch.rpm -y
# RDO kilo 法二
cat /etc/yum.repos.d/rdo-release.repo
[openstack-kilo]
name=OpenStack Kilo Repository
baseurl=http://mirrors.cloud.aliyuncs.com/centos/$releasever/cloud/$basearch/openstack-kilo/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-Cloud
#说明如果没有/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-Cloud则执行法一获取此KEY,然后在执行法二

# DataStax
[datastax]
name = DataStax Repo for Apache Cassandra
baseurl = http://rpm.datastax.com/community
enabled = 1
gpgcheck = 1
gpgkey = https://rpm.datastax.com/rpm/repo_key
# MidoNet
[midonet]
name=MidoNet
baseurl=http://repo.midonet.org/midonet/v2015.06/RHEL/7/stable/
enabled=1
gpgcheck=1
gpgkey=http://repo.midonet.org/RPM-GPG-KEY-midokura

[midonet-openstack-integration]
name=MidoNet OpenStack Integration
baseurl=http://repo.midonet.org/openstack-kilo/RHEL/7/stable/
enabled=1
gpgcheck=1
gpgkey=http://repo.midonet.org/RPM-GPG-KEY-midokura

[midonet-misc]
name=MidoNet 3rd Party Tools and Libraries
baseurl=http://repo.midonet.org/misc/RHEL/7/misc/
enabled=1
gpgcheck=1
gpgkey=http://repo.midonet.org/RPM-GPG-KEY-midokura

# 更新
yum upgrade
reboot

# 安装
yum install -y openstack-selinux
查看/etc/selinux/config
SELINUX=permissive

相关的repo也可以在 https://dl.ysicing.net/openstack 下载

安装数据库

yum install -y mariadb mariadb-server MySQL-python
systemctl enable mariadb.service
systemctl start mariadb.service
mysql_secure_installation

安装Rabbit

yum install -y rabbitmq-server
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmqctl change_password guest 123 #执行后有坑需修改其他配置文件,默认不执行

keystone安装配置

create database keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'10.%.%.%' \
  IDENTIFIED BY 'keystone123';
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'10.%.%.%' \
  IDENTIFIED BY 'neutron123';
openssl rand -hex 10
yum install -y openstack-keystone python-keystoneclient
#编辑/etc/keystone/keystone.conf
admin_token= openssl rand -hex 10 生成的值
connection=mysql://keystone:[email protected]/keystone
provider = keystone.token.providers.uuid.Provider
driver =keystone.token.persistence.backends.sql.Token
river = keystone.contrib.revoke.backends.sql.Revoke
#去掉注释
keystone-manage pki_setup --keystone-user keystone --keystone-group keystone
chown -R keystone:keystone /var/log/keystone
chown -R keystone:keystone /etc/keystone/ssl
chmod -R o-rwx /etc/keystone/ssl
#同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone 或者 keystone-manage db_sync
#如果后续出错执行
/bin/sh -c "keystone-manage db_sync" keystone
systemctl enable openstack-keystone
systemctl start openstack-keystone
export OS_SERVICE_TOKEN=137fd9ef35e8f467290e
export OS_SERVICE_ENDPOINT=http://192.168.1.101:35357/v2.0
# 创建管理租户、用户、guize
# 创建管理租户
keystone tenant-create --name admin --description "Admin Tenant"
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |           Admin Tenant           |
|   enabled   |               True               |
|      id     | 14bcfde3b6ee437eadec2decf937f505 |
|     name    |              admin               |
+-------------+----------------------------------+
# 创建管理用户
keystone user-create --name admin --pass admin123 --email [email protected]
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |        [email protected]         |
| enabled  |               True               |
|    id    | 33a13a41ca9a4993af1951d02a05faef |
|   name   |              admin               |
| username |              admin               |
+----------+----------------------------------+
# 创建管理规则
keystone role-create --name admin
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|    id    | 242811046db349308564010a925b49c7 |
|   name   |              admin               |
+----------+----------------------------------+
# 将admin规则给admin租户和admin用户添加
keystone user-role-add --user admin --tenant admin --role admin
# 创建一个service租户
keystone tenant-create --name service --description "Service Tenant"
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |          Service Tenant          |
|   enabled   |               True               |
|      id     | 2e4d87fe789e4d4690b6b08274de8c42 |
|     name    |             service              |
+-------------+----------------------------------+
# 创建服务实体和认证服务
keystone service-create --name keystone --type identity --description "OpenStack Identity"

+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |        OpenStack Identity        |
|   enabled   |               True               |
|      id     | d058989823234a2789b34ddce20d3dee |
|     name    |             keystone             |
|     type    |             identity             |
+-------------+----------------------------------+

# 创建API endpoint
keystone endpoint-create   --service-id d058989823234a2789b34ddce20d3dee   --publicurl http://192.168.1.101:5000/v2.0   --internalurl http://192.168.1.101:5000/v2.0   --adminurl http://192.168.1.101:35357/v2.0   --region regionOne

## service_id = keystone service-list | awk '/ identity / {print $2}'
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
|   adminurl  | http://192.168.1.101:35357/v2.0 |
|      id     | af9656555b0e44d0af51907dedd81a63 |
| internalurl | http://192.168.1.101:5000/v2.0  |
|  publicurl  | http://192.168.1.101:5000/v2.0  |
|    region   |            regionOne             |
|  service_id | d058989823234a2789b34ddce20d3dee |
+-------------+----------------------------------+

# 创建midonet租户
keystone service-create --name midonet --type midonet --description "MidoNet API Service"
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |       MidoNet API Service        |
|   enabled   |               True               |
|      id     | b8447795800b4c49b1378389e97b2cf7 |
|     name    |             midonet              |
|     type    |             midonet              |
+-------------+----------------------------------+
keystone user-create --name midonet --pass midonet123 --tenant admin
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |                                  |
| enabled  |               True               |
|    id    | fb2e334f531c4f98a1a2c8ecad7ebfe0 |
|   name   |             midonet              |
| tenantId | 14bcfde3b6ee437eadec2decf937f505 |
| username |             midonet              |
+----------+----------------------------------+
keystone user-role-add --user midonet --role admin --tenant admin

neutron安装配置

## admin-openrc.sh
cat << EOF >> /tmp/env.sh
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin123
export OS_AUTH_URL=http://192.168.1.101:35357/v2.0
EOF

unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
. /tmp/env.sh
keystone user-create --name neutron --pass neutron123
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |                                  |
| enabled  |               True               |
|    id    | cb33a23052004bac8edf833d0146c30c |
|   name   |             neutron              |
| username |             neutron              |
+----------+----------------------------------+
keystone user-role-add --user neutron --tenant service --role admin
# 创建网络
keystone service-create --name neutron --type network \
  --description "OpenStack Networking"
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |       OpenStack Networking       |
|   enabled   |               True               |
|      id     | 4981313387af411283f0bb629575bec1 |
|     name    |             neutron              |
|     type    |             network              |
+-------------+----------------------------------+
keystone service-list | awk '/ network / {print $2}'
4981313387af411283f0bb629575bec1

keystone endpoint-create \
  --service-id 4981313387af411283f0bb629575bec1 \
  --publicurl http://192.168.1.101:9696 \
  --adminurl http://192.168.1.101:9696 \
  --internalurl http://192.168.1.101:9696 \
  --region regionOne

+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
|   adminurl  |    http://192.168.1.101:9696    |
|      id     | fc729eec98174423ae8a9f5ec7b5f639 |
| internalurl |    http://192.168.1.101:9696    |
|  publicurl  |    http://192.168.1.101:9696    |
|    region   |            regionOne             |
|  service_id | 4981313387af411283f0bb629575bec1 |
+-------------+----------------------------------+

neutron安装配置

yum install openstack-neutron python-neutron-plugin-midonet -y
# /etc/neutron/neutron.conf

[DEFAULT]
core_plugin = neutron.plugins.midonet.plugin.MidonetPluginV2
service_plugins = lbaas
auth_strategy = keystone
rabbit_host=127.0.0.1
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://192.168.1.101:35357/v2.0/
identity_uri = http://192.168.1.101:5000
admin_tenant_name = service
admin_user = neutron
admin_password = neutron123
[database]
connection = mysql://neutron:[email protected]/neutron
[service_providers]
service_provider=LOADBALANCER:Midonet:midonet.neutron.services.loadbalancer.driver.MidonetLoadbalancerDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
[nova]
[oslo_concurrency]
lock_path = $state_path/lock
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]


# 新建plugins/midonet/midonet.ini
[DATABASE]
sql_connection = mysql://neutron:[email protected]/neutron

[MIDONET]
# MidoNet API URL
midonet_uri = http://192.168.1.101:8080/midonet-api
# MidoNet administrative user in Keystone
username = midonet
password = midonet123
# MidoNet administrative user's tenant
project_id = admin # 官方是service

#
ln -s /etc/neutron/plugins/midonet/midonet.ini /etc/neutron/plugin.ini

yum install python-neutron-lbaas -y

# 添加 /etc/neutron/neutron.conf
[service_providers]
service_provider = LOADBALANCER:Midonet:midonet.neutron.services.loadbalancer.driver.MidonetLoadbalancerDriver:default
service_plugins = lbaas

#
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/midonet/midonet.ini upgrade kilo" neutron

systemctl enable neutron-server.service
systemctl start neutron-server.service



安装ZooKeeper

# 管理机器都需要安装zookeeper
yum install java-1.7.0-openjdk -y
yum install zookeeper zkdump nmap-ncat -y
# /etc/zookeeper/zoo.cfg
server.1=192.168.1.101:2888:3888

mkdir /var/lib/zookeeper/data
chown zookeeper:zookeeper /var/lib/zookeeper/data

echo 1 > /var/lib/zookeeper/data/myid

mkdir -p /usr/java/default/bin/
ln -s /usr/lib/jvm/jre-1.7.0-openjdk/bin/java /usr/java/default/bin/java

systemctl enable zookeeper.service
systemctl start zookeeper.service

# 验证
echo ruok | nc 192.168.1.101 2181
imok

echo stat | nc 192.168.1.101 2181
Zookeeper version: 3.4.5--1, built on 02/08/2013 12:25 GMT
Clients:
 /192.168.1.101:55560[0](queued=0,recved=1,sent=0)

Latency min/avg/max: 0/0/0
Received: 2
Sent: 1
Connections: 1
Outstanding: 0
Zxid: 0x0
Mode: standalone
Node count: 4

安装cassandra

yum install dsc20 -y
# /etc/cassandra/conf/cassandra.yaml
cluster_name: 'midonet'
      - seeds: "192.168.1.101"

# /etc/cassandra/conf/cassandra.yaml
listen_address: 192.168.1.101
rpc_address: 192.168.1.101

# /etc/init.d/cassandra

case "$1" in
    start)
        # Cassandra startup
        echo -n "Starting Cassandra: "
        mkdir -p /var/run/cassandra
        chown cassandra:cassandra /var/run/cassandra
        su $CASSANDRA_OWNR -c "$CASSANDRA_PROG -p $pid_file" > $log_file 2>&1
        retval=$?

systemctl enable cassandra.service
systemctl start cassandra.service

#验证
nodetool -host 192.168.1.101 status

Datacenter: datacenter1
=======================
Status=Up/Down
|/ State=Normal/Leaving/Joining/Moving
--  Address         Load       Tokens  Owns (effective)  Host ID                               Rack
UN  192.168.1.101  46.3 KB    256     100.0%            91e42522-58f7-4ddd-bf25-e75ffd0cec32

midonet-api

yum install midonet-api -y
# 编辑 /usr/share/midonet-api/WEB-INF/web.xml
<context-param>
    <param-name>rest_api-base_uri</param-name>
    <param-value>http://192.168.1.101:8080/midonet-api</param-value>
</context-param>
<context-param>
    <param-name>keystone-service_host</param-name>
    <param-value>192.168.1.101</param-value>
</context-param>
<context-param>
    <param-name>keystone-admin_token</param-name>
    <param-value>04a74c54c7cbed1bd7c9</param-value>
</context-param>
<context-param>
    <param-name>zookeeper-zookeeper_hosts</param-name>
    <param-value>192.168.1.101:2181</param-value>
</context-param>
<context-param>
    <param-name>midocluster-properties_file</param-name>
    <param-value>/var/lib/tomcat/webapps/host_uuid.properties</param-value>
</context-param>

安装tomcat

yum install tomcat -y
# /etc/tomcat/server.xml
<Connector port="8080" protocol="HTTP/1.1"
           connectionTimeout="20000"
           URIEncoding="UTF-8"
           redirectPort="8443"
           maxHttpHeaderSize="65536" />

#新建/etc/tomcat/Catalina/localhost/midonet-api.xml
<Context
    path="/midonet-api"
    docBase="/usr/share/midonet-api"
    antiResourceLocking="false"
    privileged="true"
/>

systemctl enable tomcat.service
systemctl start tomcat.service

midonet-cli

yum install python-midonetclient -y
# 新建
 ~/.midonetrc
 [cli]
api_url = http://192.168.1.101:8080/midonet-api
username = admin
password = admin123
project_id = admin

安装midolman

yum install midolman -y
# /etc/midolman/midolman.conf
[zookeeper]
zookeeper_hosts = 192.168.1.101:2181

cat << EOF | mn-conf set -t default
zookeeper {
    zookeeper_hosts = "192.168.1.101:2181"
}

cassandra {
    servers = "192.168.1.101"
}
EOF

echo "cassandra.replication_factor : 3" | mn-conf set -t default

[zookeeper]
zookeeper_hosts = 192.168.1.101:2181
root_key = /midonet/v1
session_timeout = 30000
session_gracetime = 30000

[cassandra]
servers = 192.168.1.101
replication_factor = 3
cluster = midonet
bgpd_binary = /usr/lib/quagga/

## 配置模板
mn-conf template-set -h local -t agent-compute-medium
mv /etc/midolman/midolman-env.sh /etc/midolman/midolman-env.sh.bak
cp /etc/midolman/midolman-env.sh.compute.medium /etc/midolman/midolman-env.sh

systemctl start midolman.service

注册Midonet

midonet-cli
# 如果拒绝,重启tomcat
tunnel-zone create name tz type gre
#lie c
tunnel-zone list
#列出主机
host list

初始化

neutron net-create ext1 --router:external

Created a new network:
+-----------------+--------------------------------------+
| Field           | Value                                |
+-----------------+--------------------------------------+
| admin_state_up  | True                                 |
| id              | 566c6ac9-607c-46e1-b680-7bbf642760dd |
| name            | ext1                                 |
| router:external | True                                 |
| shared          | False                                |
| status          | ACTIVE                               |
| subnets         |                                      |
| tenant_id       | bc382097a9454ecb97a273f0c0f18f2f     |
+-----------------+--------------------------------------+
# 如果提示需要指定tenant_id 则很可能是midonet.ini文件存在问题
# provider_router_id获取
midonet-cli -e router list | grep "MidoNet Provider Router" | awk '{ print $2 }'

配置网络-GRE隧道出口机器

#创建外部网桥
ip link add type veth
ip link set dev veth0 up
ip link set dev veth1 up
brctl addbr uplinkbridge
brctl addif uplinkbridge veth0
ip addr add 172.31.0.1/30 dev uplinkbridge
ip link set dev uplinkbridge up

# 添加外到内路由
ip route add 172.16.0.0/14 via 172.31.0.2
ip route add 172.20.0.0/14 via 172.31.0.2
ip route add 172.24.0.0/14 via 172.31.0.2

# midonet 添加路由

tunnel-zone tzone0 add member host host0 address 192.168.1.101
router list name 'MidoNet Provider Router'
router router0 add port address 172.31.0.2 net 172.31.0.0/30
router router0 add route src 0.0.0.0/0 dst 0.0.0.0/0 type normal port router router0 port port0 gw 172.31.0.1
host host0 add binding port router router0 port port0 interface veth1

# 地址转化
iptables -t nat -I POSTROUTING -o eth0 -s 172.16.0.0/14 -j MASQUERADE
iptables -I FORWARD -s 172.16.0.0/14 -j ACCEPT

iptables -t nat -I POSTROUTING -o eth0 -s 172.20.0.0/14 -j MASQUERADE
iptables -I FORWARD -s 172.20.0.0/14 -j ACCEPT

iptables -t nat -I POSTROUTING -o eth0 -s 172.24.0.0/14 -j MASQUERADE
iptables -I FORWARD -s 172.24.0.0/14 -j ACCEPT

# 开机启动 /etc/rc.local
#添加一行
sh /root/bin/init_midonet_gw.sh
# init_midonet_gw.sh
ip link add type veth
ip link set dev veth0 up
ip link set dev veth1 up
brctl addbr uplinkbridge
brctl addif uplinkbridge veth0
ip addr add 172.31.0.1/30 dev uplinkbridge
ip link set dev uplinkbridge up

ip route add 172.16.0.0/14 via 172.31.0.2
ip route add 172.20.0.0/14 via 172.31.0.2
ip route add 172.24.0.0/14 via 172.31.0.2

iptables -t nat -I POSTROUTING -o eth0 -s 172.16.0.0/14 -j MASQUERADE
iptables -I FORWARD -s 172.16.0.0/14 -j ACCEPT

iptables -t nat -I POSTROUTING -o eth0 -s 172.20.0.0/14 -j MASQUERADE
iptables -I FORWARD -s 172.20.0.0/14 -j ACCEPT

iptables -t nat -I POSTROUTING -o eth0 -s 172.24.0.0/14 -j MASQUERADE
iptables -I FORWARD -s 172.24.0.0/14 -j ACCEPT

计算节点

# 添加相关源,同管理节点

# 安装midolman tree节点
yum install -y midolman

# 配置计算节点的midolman /etc/midolman/midolman.conf,有几台管理机器就类似添加

[zookeeper]
zookeeper_hosts = 192.168.1.101:2181
session_timeout = 30000
root_key = /midonet/v1
session_gracetime = 30000

[cassandra]
servers = 192.168.1.101
replication_factor = 3
cluster = midonet
bgpd_binary = /usr/lib/quagga/

# 启动midolman
start midolman

# 软连接uuid 文件
ln -s /etc/midonet_host_id.properties /etc/midolman/host_uuid.properties

# 修改配置模板
mv /etc/midolman/midolman-env.sh /etc/midolman/midolman-env.sh.bak
cp /etc/midolman/midolman-env.sh.compute.medium /etc/midolman/midolman-env.sh
mn-conf template-set -t agent-compute-medium -h <host-uuid>

# 查看是否生效
mn-conf template-list

# 重启midolman
restart midolman

管理节点midonet上操作添加节点

# 列出host
midonet> host list
midonet> tunnel-zone tzone0 list member
midonet> tunnel-zone tzone0 add  member host host11 address 192.168.1.100
# 确认一下是否添加正确
midonet> tunnel-zone tzone0 list member
*****
Written by ysicing on 11 July 2017