Discussion:
[Ryu-devel] [PATCH 1/1] Move ryu/tests/integrated/common to ryu/lib/docker
fumihiko kakuma
2017-07-10 03:51:26 UTC
Permalink
This patch requires the v2 patch of Iwase-San.

Signed-off-by: Fumihiko Kakuma <***@valinux.co.jp>
---
.travis.yml | 2 +-
ryu/lib/docker/__init__.py | 0
ryu/lib/docker/docker_base.py | 801 +++++++++++++++++++++
ryu/lib/docker/install_docker_test_pkg.sh | 43 ++
ryu/lib/docker/install_docker_test_pkg_common.sh | 39 +
.../docker/install_docker_test_pkg_for_travis.sh | 12 +
ryu/lib/docker/quagga.py | 332 +++++++++
ryu/lib/docker/ryubgp.py | 212 ++++++
ryu/tests/integrated/common/__init__.py | 0
ryu/tests/integrated/common/docker_base.py | 801 ---------------------
.../integrated/common/install_docker_test_pkg.sh | 43 --
.../common/install_docker_test_pkg_common.sh | 39 -
.../common/install_docker_test_pkg_for_travis.sh | 12 -
ryu/tests/integrated/common/quagga.py | 332 ---------
ryu/tests/integrated/common/ryubgp.py | 212 ------
tests/integrated/bgp/base.py | 6 +-
tests/integrated/bgp/base_ip6.py | 6 +-
tests/integrated/bgp/test_basic.py | 2 +-
tests/integrated/bgp/test_ip6_basic.py | 2 +-
19 files changed, 1448 insertions(+), 1448 deletions(-)
create mode 100644 ryu/lib/docker/__init__.py
create mode 100644 ryu/lib/docker/docker_base.py
create mode 100644 ryu/lib/docker/install_docker_test_pkg.sh
create mode 100644 ryu/lib/docker/install_docker_test_pkg_common.sh
create mode 100644 ryu/lib/docker/install_docker_test_pkg_for_travis.sh
create mode 100644 ryu/lib/docker/quagga.py
create mode 100644 ryu/lib/docker/ryubgp.py
delete mode 100644 ryu/tests/integrated/common/__init__.py
delete mode 100644 ryu/tests/integrated/common/docker_base.py
delete mode 100644 ryu/tests/integrated/common/install_docker_test_pkg.sh
delete mode 100644 ryu/tests/integrated/common/install_docker_test_pkg_common.sh
delete mode 100644 ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
delete mode 100644 ryu/tests/integrated/common/quagga.py
delete mode 100644 ryu/tests/integrated/common/ryubgp.py

diff --git a/.travis.yml b/.travis.yml
index 9e5474a..cd35aac 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ sudo: required # Required to enable Docker service

install:
- pip install tox coveralls
- - bash ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
+ - bash ryu/lib/docker/install_docker_test_pkg_for_travis.sh

script:
- NOSE_VERBOSE=0 tox -e $TOX_ENV
diff --git a/ryu/lib/docker/__init__.py b/ryu/lib/docker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/ryu/lib/docker/docker_base.py b/ryu/lib/docker/docker_base.py
new file mode 100644
index 0000000..1ae2cc2
--- /dev/null
+++ b/ryu/lib/docker/docker_base.py
@@ -0,0 +1,801 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/base.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import itertools
+import logging
+import os
+import subprocess
+import time
+
+import netaddr
+import six
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TEST_PREFIX = ''
+DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
+TEST_PREFIX = DEFAULT_TEST_PREFIX
+TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
+
+BGP_FSM_IDLE = 'BGP_FSM_IDLE'
+BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
+BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
+
+BGP_ATTR_TYPE_ORIGIN = 1
+BGP_ATTR_TYPE_AS_PATH = 2
+BGP_ATTR_TYPE_NEXT_HOP = 3
+BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
+BGP_ATTR_TYPE_LOCAL_PREF = 5
+BGP_ATTR_TYPE_COMMUNITIES = 8
+BGP_ATTR_TYPE_ORIGINATOR_ID = 9
+BGP_ATTR_TYPE_CLUSTER_LIST = 10
+BGP_ATTR_TYPE_MP_REACH_NLRI = 14
+BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+
+BRIDGE_TYPE_DOCKER = 'docker'
+BRIDGE_TYPE_BRCTL = 'brctl'
+BRIDGE_TYPE_OVS = 'ovs'
+
+
+class CommandError(Exception):
+ def __init__(self, out):
+ super(CommandError, self).__init__()
+ self.out = out
+
+
+def try_several_times(f, t=3, s=1):
+ e = RuntimeError()
+ for _ in range(t):
+ try:
+ r = f()
+ except RuntimeError as e:
+ time.sleep(s)
+ else:
+ return r
+ raise e
+
+
+class CmdBuffer(list):
+ def __init__(self, delim='\n'):
+ super(CmdBuffer, self).__init__()
+ self.delim = delim
+
+ def __lshift__(self, value):
+ self.append(value)
+
+ def __str__(self):
+ return self.delim.join(self)
+
+
+class CommandOut(str):
+
+ def __new__(cls, stdout, stderr, command, returncode, **kwargs):
+ stdout = stdout or ''
+ obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
+ obj.stderr = stderr or ''
+ obj.command = command
+ obj.returncode = returncode
+ return obj
+
+
+class Command(object):
+
+ def _execute(self, cmd, capture=False, executable=None):
+ """Execute a command using subprocess.Popen()
+ :Parameters:
+ - out: stdout from subprocess.Popen()
+ out has some attributes.
+ out.returncode: returncode of subprocess.Popen()
+ out.stderr: stderr from subprocess.Popen()
+ """
+ if capture:
+ p_stdout = subprocess.PIPE
+ p_stderr = subprocess.PIPE
+ else:
+ p_stdout = None
+ p_stderr = None
+ pop = subprocess.Popen(cmd, shell=True, executable=executable,
+ stdout=p_stdout,
+ stderr=p_stderr)
+ __stdout, __stderr = pop.communicate()
+ _stdout = six.text_type(__stdout, 'utf-8')
+ _stderr = six.text_type(__stderr, 'utf-8')
+ out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
+ return out
+
+ def execute(self, cmd, capture=True, try_times=1, interval=1):
+ out = None
+ for i in range(try_times):
+ out = self._execute(cmd, capture=capture)
+ LOG.info(out.command)
+ if out.returncode == 0:
+ return out
+ LOG.error("stdout: %s", out)
+ LOG.error("stderr: %s", out.stderr)
+ if i + 1 >= try_times:
+ break
+ time.sleep(interval)
+ raise CommandError(out)
+
+ def sudo(self, cmd, capture=True, try_times=1, interval=1):
+ cmd = 'sudo %s' % cmd
+ return self.execute(cmd, capture=capture,
+ try_times=try_times, interval=interval)
+
+
+class DockerImage(object):
+ def __init__(self, baseimage='ubuntu:16.04'):
+ self.baseimage = baseimage
+ self.cmd = Command()
+
+ def get_images(self):
+ out = self.cmd.sudo('sudo docker images')
+ images = []
+ for line in out.splitlines()[1:]:
+ images.append(line.split()[0])
+ return images
+
+ def exist(self, name):
+ return name in self.get_images()
+
+ def build(self, tagname, dockerfile_dir):
+ self.cmd.sudo(
+ "docker build -t {0} {1}".format(tagname, dockerfile_dir),
+ try_times=3)
+
+ def remove(self, tagname, check_exist=False):
+ if check_exist and not self.exist(tagname):
+ return tagname
+ self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
+
+ def create_quagga(self, tagname='quagga', image=None, check_exist=False):
+ if check_exist and self.exist(tagname):
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ pkges = ' '.join([
+ 'telnet',
+ 'tcpdump',
+ 'quagga',
+ ])
+ if image:
+ use_image = image
+ else:
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'RUN apt-get update'
+ c << 'RUN apt-get install -qy --no-install-recommends %s' % pkges
+ c << 'CMD /usr/lib/quagga/bgpd'
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
+ self.build(tagname, workdir)
+ return tagname
+
+ def create_ryu(self, tagname='ryu', image=None, check_exist=False):
+ if check_exist and self.exist(tagname):
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ workdir_ctn = '/root/osrg/ryu'
+ pkges = ' '.join([
+ 'tcpdump',
+ 'iproute2',
+ ])
+ if image:
+ use_image = image
+ else:
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'ADD ryu %s' % workdir_ctn
+ install = ' '.join([
+ 'RUN apt-get update',
+ '&& apt-get install -qy --no-install-recommends %s' % pkges,
+ '&& cd %s' % workdir_ctn,
+ # Note: Clean previous builds, because "python setup.py install"
+ # might fail if the current directory contains the symlink to
+ # Docker host file systems.
+ '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
+ '&& pip install -r tools/pip-requires -r tools/optional-requires',
+ '&& python setup.py install',
+ ])
+ c << install
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
+ self.cmd.execute('cp -r ../ryu %s/' % workdir)
+ self.build(tagname, workdir)
+ return tagname
+
+
+class Bridge(object):
+ def __init__(self, name, subnet='', start_ip=None, end_ip=None,
+ with_ip=True, self_ip=False,
+ fixed_ip=None, reuse=False,
+ br_type='docker'):
+ """Manage a bridge
+ :Parameters:
+ - name: bridge name
+ - subnet: network cider to be used in this bridge
+ - start_ip: start address of an ip to be used in the subnet
+ - end_ip: end address of an ip to be used in the subnet
+ - with_ip: specify if assign automatically an ip address
+ - self_ip: specify if assign an ip address for the bridge
+ - fixed_ip: an ip address to be assigned to the bridge
+ - reuse: specify if use an existing bridge
+ - br_type: One either in a 'docker', 'brctl' or 'ovs'
+ """
+ self.cmd = Command()
+ self.name = name
+ if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
+ BRIDGE_TYPE_OVS):
+ raise Exception("argument error br_type: %s" % br_type)
+ self.br_type = br_type
+ self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
+ if TEST_PREFIX != '':
+ self.name = '{0}_{1}'.format(TEST_PREFIX, name)
+ self.with_ip = with_ip
+ if with_ip:
+ self.subnet = netaddr.IPNetwork(subnet)
+ if start_ip:
+ self.start_ip = start_ip
+ else:
+ self.start_ip = netaddr.IPAddress(self.subnet.first)
+ if end_ip:
+ self.end_ip = end_ip
+ else:
+ self.end_ip = netaddr.IPAddress(self.subnet.last)
+
+ def _ip_gen():
+ for host in netaddr.IPRange(self.start_ip, self.end_ip):
+ yield host
+ self._ip_generator = _ip_gen()
+ # throw away first network address
+ self.next_ip_address()
+
+ self.self_ip = self_ip
+ if fixed_ip:
+ self.ip_addr = fixed_ip
+ else:
+ self.ip_addr = self.next_ip_address()
+ if not reuse:
+ def f():
+ if self.br_type == BRIDGE_TYPE_DOCKER:
+ gw = "--gateway %s" % self.ip_addr.split('/')[0]
+ v6 = ''
+ if self.subnet.version == 6:
+ v6 = '--ipv6'
+ cmd = ("docker network create --driver bridge %s "
+ "%s --subnet %s %s" % (v6, gw, subnet, self.name))
+ elif self.br_type == BRIDGE_TYPE_BRCTL:
+ cmd = "ip link add {0} type bridge".format(self.name)
+ elif self.br_type == BRIDGE_TYPE_OVS:
+ cmd = "ovs-vsctl add-br {0}".format(self.name)
+ else:
+ raise ValueError('Unsupported br_type: %s' % self.br_type)
+ self.delete()
+ self.execute(cmd, sudo=True, retry=True)
+ try_several_times(f)
+ if not self.docker_nw:
+ self.execute("ip link set up dev {0}".format(self.name),
+ sudo=True, retry=True)
+
+ if not self.docker_nw and self_ip:
+ ips = self.check_br_addr(self.name)
+ for key, ip in ips.items():
+ if self.subnet.version == key:
+ self.execute(
+ "ip addr del {0} dev {1}".format(ip, self.name),
+ sudo=True, retry=True)
+ self.execute(
+ "ip addr add {0} dev {1}".format(self.ip_addr, self.name),
+ sudo=True, retry=True)
+ self.ctns = []
+
+ def get_bridges_dc(self):
+ out = self.execute('docker network ls', sudo=True, retry=True)
+ bridges = []
+ for line in out.splitlines()[1:]:
+ bridges.append(line.split()[1])
+ return bridges
+
+ def get_bridges_brctl(self):
+ out = self.execute('brctl show', retry=True)
+ bridges = []
+ for line in out.splitlines()[1:]:
+ bridges.append(line.split()[0])
+ return bridges
+
+ def get_bridges_ovs(self):
+ out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
+ return out.splitlines()
+
+ def get_bridges(self):
+ if self.br_type == BRIDGE_TYPE_DOCKER:
+ return self.get_bridges_dc()
+ elif self.br_type == BRIDGE_TYPE_BRCTL:
+ return self.get_bridges_brctl()
+ elif self.br_type == BRIDGE_TYPE_OVS:
+ return self.get_bridges_ovs()
+
+ def exist(self):
+ return self.name in self.get_bridges()
+
+ def execute(self, cmd, capture=True, sudo=False, retry=False):
+ if sudo:
+ m = self.cmd.sudo
+ else:
+ m = self.cmd.execute
+ if retry:
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ else:
+ return m(cmd, capture=capture)
+
+ def check_br_addr(self, br):
+ ips = {}
+ cmd = "ip a show dev %s" % br
+ for line in self.execute(cmd, sudo=True).split('\n'):
+ if line.strip().startswith("inet "):
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[4] = elems[1]
+ elif line.strip().startswith("inet6 "):
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[6] = elems[1]
+ return ips
+
+ def next_ip_address(self):
+ return "{0}/{1}".format(next(self._ip_generator),
+ self.subnet.prefixlen)
+
+ def addif(self, ctn):
+ name = ctn.next_if_name()
+ self.ctns.append(ctn)
+ ip_address = None
+ if self.docker_nw:
+ ipv4 = None
+ ipv6 = None
+ ip_address = self.next_ip_address()
+ ip_address_ip = ip_address.split('/')[0]
+ version = 4
+ if netaddr.IPNetwork(ip_address).version == 6:
+ version = 6
+ opt_ip = "--ip %s" % ip_address_ip
+ if version == 4:
+ ipv4 = ip_address
+ else:
+ opt_ip = "--ip6 %s" % ip_address_ip
+ ipv6 = ip_address
+ cmd = "docker network connect %s %s %s" % (
+ opt_ip, self.name, ctn.docker_name())
+ self.execute(cmd, sudo=True)
+ ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=name)
+ else:
+ if self.with_ip:
+ ip_address = self.next_ip_address()
+ version = 4
+ if netaddr.IPNetwork(ip_address).version == 6:
+ version = 6
+ ctn.pipework(self, ip_address, name, version=version)
+ else:
+ ctn.pipework(self, '0/0', name)
+ return ip_address
+
+ def delete(self, check_exist=True):
+ if check_exist:
+ if not self.exist():
+ return
+ if self.br_type == BRIDGE_TYPE_DOCKER:
+ self.execute("docker network rm %s" % self.name,
+ sudo=True, retry=True)
+ elif self.br_type == BRIDGE_TYPE_BRCTL:
+ self.execute("ip link set down dev %s" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ip link delete %s type bridge" % self.name,
+ sudo=True, retry=True)
+ elif self.br_type == BRIDGE_TYPE_OVS:
+ self.execute(
+ "ovs-vsctl del-br %s" % self.name,
+ sudo=True, retry=True)
+
+
+class Container(object):
+ def __init__(self, name, image=None):
+ self.name = name
+ self.image = image
+ self.shared_volumes = []
+ self.ip_addrs = []
+ self.ip6_addrs = []
+ self.is_running = False
+ self.eths = []
+ self.id = None
+
+ self.cmd = Command()
+ self.remove()
+
+ def docker_name(self):
+ if TEST_PREFIX == DEFAULT_TEST_PREFIX:
+ return self.name
+ return '{0}_{1}'.format(TEST_PREFIX, self.name)
+
+ def get_docker_id(self):
+ if self.id:
+ return self.id
+ else:
+ return self.docker_name()
+
+ def next_if_name(self):
+ name = 'eth{0}'.format(len(self.eths) + 1)
+ self.eths.append(name)
+ return name
+
+ def set_addr_info(self, bridge, ipv4=None, ipv6=None, ifname='eth0'):
+ if ipv4:
+ self.ip_addrs.append((ifname, ipv4, bridge))
+ if ipv6:
+ self.ip6_addrs.append((ifname, ipv6, bridge))
+
+ def get_addr_info(self, bridge, ipv=4):
+ addrinfo = {}
+ if ipv == 4:
+ ip_addrs = self.ip_addrs
+ elif ipv == 6:
+ ip_addrs = self.ip6_addrs
+ else:
+ return None
+ for addr in ip_addrs:
+ if addr[2] == bridge:
+ addrinfo[addr[1]] = addr[0]
+ return addrinfo
+
+ def execute(self, cmd, capture=True, sudo=False, retry=False):
+ if sudo:
+ m = self.cmd.sudo
+ else:
+ m = self.cmd.execute
+ if retry:
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ else:
+ return m(cmd, capture=capture)
+
+ def dcexec(self, cmd, capture=True, retry=False):
+ if retry:
+ return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
+ else:
+ return self.cmd.sudo(cmd, capture=capture)
+
+ def exec_on_ctn(self, cmd, capture=True, detach=False):
+ name = self.docker_name()
+ flag = '-d' if detach else ''
+ return self.dcexec('docker exec {0} {1} {2}'.format(
+ flag, name, cmd), capture=capture)
+
+ def get_containers(self, allctn=False):
+ cmd = 'docker ps --no-trunc=true'
+ if allctn:
+ cmd += ' --all=true'
+ out = self.dcexec(cmd, retry=True)
+ containers = []
+ for line in out.splitlines()[1:]:
+ containers.append(line.split()[-1])
+ return containers
+
+ def exist(self, allctn=False):
+ return self.docker_name() in self.get_containers(allctn=allctn)
+
+ def run(self):
+ c = CmdBuffer(' ')
+ c << "docker run --privileged=true"
+ for sv in self.shared_volumes:
+ c << "-v {0}:{1}".format(sv[0], sv[1])
+ c << "--name {0} --hostname {0} -id {1}".format(self.docker_name(),
+ self.image)
+ self.id = self.dcexec(str(c), retry=True)
+ self.is_running = True
+ self.exec_on_ctn("ip li set up dev lo")
+ ipv4 = None
+ ipv6 = None
+ for line in self.exec_on_ctn("ip a show dev eth0").split('\n'):
+ if line.strip().startswith("inet "):
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv4 = elems[1]
+ elif line.strip().startswith("inet6 "):
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv6 = elems[1]
+ self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
+ ifname='eth0')
+ return 0
+
+ def stop(self, check_exist=True):
+ if check_exist:
+ if not self.exist(allctn=False):
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ def remove(self, check_exist=True):
+ if check_exist:
+ if not self.exist(allctn=True):
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ def pipework(self, bridge, ip_addr, intf_name="", version=4):
+ if not self.is_running:
+ LOG.warning('Call run() before pipeworking')
+ return
+ c = CmdBuffer(' ')
+ c << "pipework {0}".format(bridge.name)
+
+ if intf_name != "":
+ c << "-i {0}".format(intf_name)
+ else:
+ intf_name = "eth1"
+ ipv4 = None
+ ipv6 = None
+ if version == 4:
+ ipv4 = ip_addr
+ else:
+ c << '-a 6'
+ ipv6 = ip_addr
+ c << "{0} {1}".format(self.docker_name(), ip_addr)
+ self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=intf_name)
+ self.execute(str(c), sudo=True, retry=True)
+
+ def get_pid(self):
+ if self.is_running:
+ cmd = "docker inspect -f '{{.State.Pid}}' %s" % self.docker_name()
+ return int(self.dcexec(cmd))
+ return -1
+
+ def start_tcpdump(self, interface=None, filename=None):
+ if not interface:
+ interface = "eth0"
+ if not filename:
+ filename = "{0}/{1}.dump".format(
+ self.shared_volumes[0][1], interface)
+ self.exec_on_ctn(
+ "tcpdump -i {0} -w {1}".format(interface, filename),
+ detach=True)
+
+
+class BGPContainer(Container):
+
+ WAIT_FOR_BOOT = 1
+ RETRY_INTERVAL = 5
+ DEFAULT_PEER_ARG = {'neigh_addr': '',
+ 'passwd': None,
+ 'vpn': False,
+ 'flowspec': False,
+ 'is_rs_client': False,
+ 'is_rr_client': False,
+ 'cluster_id': None,
+ 'policies': None,
+ 'passive': False,
+ 'local_addr': '',
+ 'as2': False,
+ 'graceful_restart': None,
+ 'local_as': None,
+ 'prefix_limit': None}
+ default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
+ DEFAULT_ROUTE_ARG = {'prefix': None,
+ 'rf': 'ipv4',
+ 'attr': None,
+ 'next-hop': None,
+ 'as-path': None,
+ 'community': None,
+ 'med': None,
+ 'local-pref': None,
+ 'extended-community': None,
+ 'matchs': None,
+ 'thens': None}
+ default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
+
+ def __init__(self, name, asn, router_id, ctn_image_name=None):
+ self.config_dir = TEST_BASE_DIR
+ if TEST_PREFIX:
+ self.config_dir = os.path.join(self.config_dir, TEST_PREFIX)
+ self.config_dir = os.path.join(self.config_dir, name)
+ self.asn = asn
+ self.router_id = router_id
+ self.peers = {}
+ self.routes = {}
+ self.policies = {}
+ super(BGPContainer, self).__init__(name, ctn_image_name)
+ self.execute(
+ 'rm -rf {0}'.format(self.config_dir), sudo=True)
+ self.execute('mkdir -p {0}'.format(self.config_dir))
+ self.execute('chmod 777 {0}'.format(self.config_dir))
+
+ def __repr__(self):
+ return str({'name': self.name, 'asn': self.asn,
+ 'router_id': self.router_id})
+
+ def run(self, wait=False, w_time=WAIT_FOR_BOOT):
+ self.create_config()
+ super(BGPContainer, self).run()
+ if wait:
+ time.sleep(w_time)
+ return w_time
+
+ def add_peer(self, peer, bridge='', reload_config=True, v6=False,
+ peer_info=None):
+ peer_info = peer_info or {}
+ self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
+ self.peers[peer].update(peer_info)
+ peer_keys = sorted(self.peers[peer].keys())
+ if peer_keys != self.default_peer_keys:
+ raise Exception("argument error peer_info: %s" % peer_info)
+
+ neigh_addr = ''
+ local_addr = ''
+ it = itertools.product(self.ip_addrs, peer.ip_addrs)
+ if v6:
+ it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
+
+ for me, you in it:
+ if bridge != '' and bridge != me[2]:
+ continue
+ if me[2] == you[2]:
+ neigh_addr = you[1]
+ local_addr = me[1]
+ if v6:
+ addr, mask = local_addr.split('/')
+ local_addr = "{0}%{1}/{2}".format(addr, me[0], mask)
+ break
+
+ if neigh_addr == '':
+ raise Exception('peer {0} seems not ip reachable'.format(peer))
+
+ if not self.peers[peer]['policies']:
+ self.peers[peer]['policies'] = {}
+
+ self.peers[peer]['neigh_addr'] = neigh_addr
+ self.peers[peer]['local_addr'] = local_addr
+ if self.is_running and reload_config:
+ self.create_config()
+ self.reload_config()
+
+ def del_peer(self, peer, reload_config=True):
+ del self.peers[peer]
+ if self.is_running and reload_config:
+ self.create_config()
+ self.reload_config()
+
+ def disable_peer(self, peer):
+ raise NotImplementedError()
+
+ def enable_peer(self, peer):
+ raise NotImplementedError()
+
+ def log(self):
+ return self.execute('cat {0}/*.log'.format(self.config_dir))
+
+ def add_route(self, route, reload_config=True, route_info=None):
+ route_info = route_info or {}
+ self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
+ self.routes[route].update(route_info)
+ route_keys = sorted(self.routes[route].keys())
+ if route_keys != self.default_route_keys:
+ raise Exception("argument error route_info: %s" % route_info)
+ self.routes[route]['prefix'] = route
+ if self.is_running and reload_config:
+ self.create_config()
+ self.reload_config()
+
+ def add_policy(self, policy, peer, typ, default='accept',
+ reload_config=True):
+ self.set_default_policy(peer, typ, default)
+ self.define_policy(policy)
+ self.assign_policy(peer, policy, typ)
+ if self.is_running and reload_config:
+ self.create_config()
+ self.reload_config()
+
+ def set_default_policy(self, peer, typ, default):
+ if (typ in ['in', 'out', 'import', 'export'] and
+ default in ['reject', 'accept']):
+ if 'default-policy' not in self.peers[peer]:
+ self.peers[peer]['default-policy'] = {}
+ self.peers[peer]['default-policy'][typ] = default
+ else:
+ raise Exception('wrong type or default')
+
+ def define_policy(self, policy):
+ self.policies[policy['name']] = policy
+
+ def assign_policy(self, peer, policy, typ):
+ if peer not in self.peers:
+ raise Exception('peer {0} not found'.format(peer.name))
+ name = policy['name']
+ if name not in self.policies:
+ raise Exception('policy {0} not found'.format(name))
+ self.peers[peer]['policies'][typ] = policy
+
+ def get_local_rib(self, peer, rf):
+ raise NotImplementedError()
+
+ def get_global_rib(self, rf):
+ raise NotImplementedError()
+
+ def get_neighbor_state(self, peer_id):
+ raise NotImplementedError()
+
+ def get_reachablily(self, prefix, timeout=20):
+ version = netaddr.IPNetwork(prefix).version
+ addr = prefix.split('/')[0]
+ if version == 4:
+ ping_cmd = 'ping'
+ elif version == 6:
+ ping_cmd = 'ping6'
+ else:
+ raise Exception(
+ 'unsupported route family: {0}'.format(version))
+ cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format(
+ ping_cmd, addr)
+ interval = 1
+ count = 0
+ while True:
+ res = self.exec_on_ctn(cmd)
+ LOG.info(res)
+ if '1 packets received' in res and '0% packet loss':
+ break
+ time.sleep(interval)
+ count += interval
+ if count >= timeout:
+ raise Exception('timeout')
+ return True
+
+ def wait_for(self, expected_state, peer, timeout=120):
+ interval = 1
+ count = 0
+ while True:
+ state = self.get_neighbor_state(peer)
+ LOG.info("%s's peer %s state: %s",
+ self.router_id, peer.router_id, state)
+ if state == expected_state:
+ return
+
+ time.sleep(interval)
+ count += interval
+ if count >= timeout:
+ raise Exception('timeout')
+
+ def add_static_route(self, network, next_hop):
+ cmd = '/sbin/ip route add {0} via {1}'.format(network, next_hop)
+ self.exec_on_ctn(cmd)
+
+ def set_ipv6_forward(self):
+ cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
+ self.exec_on_ctn(cmd)
+
+ def create_config(self):
+ raise NotImplementedError()
+
+ def reload_config(self):
+ raise NotImplementedError()
diff --git a/ryu/lib/docker/install_docker_test_pkg.sh b/ryu/lib/docker/install_docker_test_pkg.sh
new file mode 100644
index 0000000..a771dfc
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+function add_docker_aptline {
+ sudo apt-get update
+ if ! apt-cache search docker-engine | grep docker-engine; then
+ VER=`lsb_release -r`
+ if echo $VER | grep 12.04; then
+ REL_NAME=precise
+ elif echo $VER | grep 14.04; then
+ REL_NAME=trusty
+ elif echo $VER | grep 15.10; then
+ REL_NAME=wily
+ elif echo $VER | grep 16.04; then
+ REL_NAME=xenial
+ else
+ retrun 1
+ fi
+ RELEASE=ubuntu-$REL_NAME
+ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+ sudo sh -c "echo deb https://apt.dockerproject.org/repo $RELEASE main > /etc/apt/sources.list.d/docker.list"
+ fi
+}
+
+init_variables
+process_options "$@"
+
+if [ $APTLINE_DOCKER -eq 1 ]; then
+ add_docker_aptline
+fi
+
+sudo apt-get update
+if apt-cache search docker-engine | grep docker-engine; then
+ DOCKER_PKG=docker-engine
+else
+ DOCKER_PKG=docker.io
+fi
+sudo apt-get install -y $DOCKER_PKG
+install_depends_pkg
diff --git a/ryu/lib/docker/install_docker_test_pkg_common.sh b/ryu/lib/docker/install_docker_test_pkg_common.sh
new file mode 100644
index 0000000..44a3e10
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_common.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -ex
+
+function init_variables {
+ APTLINE_DOCKER=0
+ DIR_BASE=/tmp
+}
+
+function process_options {
+ local max
+ local i
+ max=$#
+ i=1
+ while [ $i -le $max ]; do
+ case "$1" in
+ -a|--add-docker-aptline)
+ APTLINE_DOCKER=1
+ ;;
+ -d|--download-dir)
+ shift; ((i++))
+ DIR_BASE=$1
+ ;;
+ esac
+ shift; ((i++))
+ done
+}
+
+function install_pipework {
+ if ! which /usr/local/bin/pipework >/dev/null
+ then
+ sudo rm -rf $DIR_BASE/pipework
+ git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework
+ sudo install -m 0755 $DIR_BASE/pipework/pipework /usr/local/bin/pipework
+ fi
+}
+
+function install_depends_pkg {
+ install_pipework
+}
diff --git a/ryu/lib/docker/install_docker_test_pkg_for_travis.sh b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
new file mode 100644
index 0000000..d8c3b49
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+init_variables
+process_options "$@"
+
+sudo apt-get update
+install_depends_pkg
diff --git a/ryu/lib/docker/quagga.py b/ryu/lib/docker/quagga.py
new file mode 100644
index 0000000..9b6d218
--- /dev/null
+++ b/ryu/lib/docker/quagga.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/quagga.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import netaddr
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+class QuaggaBGPContainer(base.BGPContainer):
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/quagga'
+
+ def __init__(self, name, asn, router_id, ctn_image_name, zebra=False):
+ super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
+ self.zebra = zebra
+ self._create_config_debian()
+
+ def run(self, wait=False, w_time=WAIT_FOR_BOOT):
+ w_time = super(QuaggaBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ def get_global_rib(self, prefix='', rf='ipv4'):
+ rib = []
+ if prefix != '':
+ return self.get_global_rib_with_prefix(prefix, rf)
+
+ out = self.vtysh('show bgp {0} unicast'.format(rf), config=False)
+ if out.startswith('No BGP network exists'):
+ return rib
+
+ read_next = False
+
+ for line in out.split('\n'):
+ ibgp = False
+ if line[:2] == '*>':
+ line = line[2:]
+ if line[0] == 'i':
+ line = line[1:]
+ ibgp = True
+ elif not read_next:
+ continue
+
+ elems = line.split()
+
+ if len(elems) == 1:
+ read_next = True
+ prefix = elems[0]
+ continue
+ elif read_next:
+ nexthop = elems[0]
+ else:
+ prefix = elems[0]
+ nexthop = elems[1]
+ read_next = False
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'ibgp': ibgp})
+
+ return rib
+
+ def get_global_rib_with_prefix(self, prefix, rf):
+ rib = []
+
+ lines = [line.strip() for line in self.vtysh(
+ 'show bgp {0} unicast {1}'.format(rf, prefix),
+ config=False).split('\n')]
+
+ if lines[0] == '% Network not in table':
+ return rib
+
+ lines = lines[2:]
+
+ if lines[0].startswith('Not advertised'):
+ lines.pop(0) # another useless line
+ elif lines[0].startswith('Advertised to non peer-group peers:'):
+ lines = lines[2:] # other useless lines
+ else:
+ raise Exception('unknown output format {0}'.format(lines))
+
+ if lines[0] == 'Local':
+ aspath = []
+ else:
+ aspath = [int(asn) for asn in lines[0].split()]
+
+ nexthop = lines[1].split()[0].strip()
+ info = [s.strip(',') for s in lines[2].split()]
+ attrs = []
+ if 'metric' in info:
+ med = info[info.index('metric') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
+ 'metric': int(med)})
+ if 'localpref' in info:
+ localpref = info[info.index('localpref') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
+ 'value': int(localpref)})
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'aspath': aspath, 'attrs': attrs})
+
+ return rib
+
+ def get_neighbor_state(self, peer):
+ if peer not in self.peers:
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
+
+ info = [l.strip() for l in self.vtysh(
+ 'show bgp neighbors {0}'.format(neigh_addr),
+ config=False).split('\n')]
+
+ if not info[0].startswith('BGP neighbor is'):
+ raise Exception('unknown format')
+
+ idx1 = info[0].index('BGP neighbor is ')
+ idx2 = info[0].index(',')
+ n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
+ if n_addr == neigh_addr:
+ idx1 = info[2].index('= ')
+ state = info[2][idx1 + len('= '):]
+ if state.startswith('Idle'):
+ return base.BGP_FSM_IDLE
+ elif state.startswith('Active'):
+ return base.BGP_FSM_ACTIVE
+ elif state.startswith('Established'):
+ return base.BGP_FSM_ESTABLISHED
+ else:
+ return state
+
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ def send_route_refresh(self):
+ self.vtysh('clear ip bgp * soft', config=False)
+
+ def create_config(self):
+ zebra = 'no'
+ self._create_config_bgp()
+ if self.zebra:
+ zebra = 'yes'
+ self._create_config_zebra()
+ self._create_config_daemons(zebra)
+
+ def _create_config_debian(self):
+ c = base.CmdBuffer()
+ c << 'vtysh_enable=yes'
+ c << 'zebra_options=" --daemon -A 127.0.0.1"'
+ c << 'bgpd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospfd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospf6d_options=" --daemon -A ::1"'
+ c << 'ripd_options=" --daemon -A 127.0.0.1"'
+ c << 'ripngd_options=" --daemon -A ::1"'
+ c << 'isisd_options=" --daemon -A 127.0.0.1"'
+ c << 'babeld_options=" --daemon -A 127.0.0.1"'
+ c << 'watchquagga_enable=yes'
+ c << 'watchquagga_options=(--daemon)'
+ with open('{0}/debian.conf'.format(self.config_dir), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def _create_config_daemons(self, zebra='no'):
+ c = base.CmdBuffer()
+ c << 'zebra=%s' % zebra
+ c << 'bgpd=yes'
+ c << 'ospfd=no'
+ c << 'ospf6d=no'
+ c << 'ripd=no'
+ c << 'ripngd=no'
+ c << 'isisd=no'
+ c << 'babeld=no'
+ with open('{0}/daemons'.format(self.config_dir), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def _create_config_bgp(self):
+
+ c = base.CmdBuffer()
+ c << 'hostname bgpd'
+ c << 'password zebra'
+ c << 'router bgp {0}'.format(self.asn)
+ c << 'bgp router-id {0}'.format(self.router_id)
+ if any(info['graceful_restart'] for info in self.peers.values()):
+ c << 'bgp graceful-restart'
+
+ version = 4
+ for peer, info in self.peers.items():
+ version = netaddr.IPNetwork(info['neigh_addr']).version
+ n_addr = info['neigh_addr'].split('/')[0]
+ if version == 6:
+ c << 'no bgp default ipv4-unicast'
+
+ c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
+ if info['is_rs_client']:
+ c << 'neighbor {0} route-server-client'.format(n_addr)
+ for typ, p in info['policies'].items():
+ c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
+ typ)
+ if info['passwd']:
+ c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
+ if info['passive']:
+ c << 'neighbor {0} passive'.format(n_addr)
+ if version == 6:
+ c << 'address-family ipv6 unicast'
+ c << 'neighbor {0} activate'.format(n_addr)
+ c << 'exit-address-family'
+
+ for route in self.routes.values():
+ if route['rf'] == 'ipv4':
+ c << 'network {0}'.format(route['prefix'])
+ elif route['rf'] == 'ipv6':
+ c << 'address-family ipv6 unicast'
+ c << 'network {0}'.format(route['prefix'])
+ c << 'exit-address-family'
+ else:
+ raise Exception(
+ 'unsupported route faily: {0}'.format(route['rf']))
+
+ if self.zebra:
+ if version == 6:
+ c << 'address-family ipv6 unicast'
+ c << 'redistribute connected'
+ c << 'exit-address-family'
+ else:
+ c << 'redistribute connected'
+
+ for name, policy in self.policies.items():
+ c << 'access-list {0} {1} {2}'.format(name, policy['type'],
+ policy['match'])
+ c << 'route-map {0} permit 10'.format(name)
+ c << 'match ip address {0}'.format(name)
+ c << 'set metric {0}'.format(policy['med'])
+
+ c << 'debug bgp as4'
+ c << 'debug bgp fsm'
+ c << 'debug bgp updates'
+ c << 'debug bgp events'
+ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
+
+ with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def _create_config_zebra(self):
+ c = base.CmdBuffer()
+ c << 'hostname zebra'
+ c << 'password zebra'
+ c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
+ c << 'debug zebra packet'
+ c << 'debug zebra kernel'
+ c << 'debug zebra rib'
+ c << ''
+
+ with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def vtysh(self, cmd, config=True):
+ if not isinstance(cmd, list):
+ cmd = [cmd]
+ cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
+ if config:
+ return self.exec_on_ctn(
+ "vtysh -d bgpd -c 'en' -c 'conf t' -c "
+ "'router bgp {0}' {1}".format(self.asn, cmd),
+ capture=True)
+ else:
+ return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
+ capture=True)
+
+ def reload_config(self):
+ daemon = []
+ daemon.append('bgpd')
+ if self.zebra:
+ daemon.append('zebra')
+ for d in daemon:
+ cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
+ self.exec_on_ctn(cmd, capture=True)
+
+
+class RawQuaggaBGPContainer(QuaggaBGPContainer):
+ def __init__(self, name, config, ctn_image_name,
+ zebra=False):
+ asn = None
+ router_id = None
+ for line in config.split('\n'):
+ line = line.strip()
+ if line.startswith('router bgp'):
+ asn = int(line[len('router bgp'):].strip())
+ if line.startswith('bgp router-id'):
+ router_id = line[len('bgp router-id'):].strip()
+ if not asn:
+ raise Exception('asn not in quagga config')
+ if not router_id:
+ raise Exception('router-id not in quagga config')
+ self.config = config
+ super(RawQuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name, zebra)
+
+ def create_config(self):
+ with open(os.path.join(self.config_dir, 'bgpd.conf'), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(self.config)
+ f.writelines(self.config)
diff --git a/ryu/lib/docker/ryubgp.py b/ryu/lib/docker/ryubgp.py
new file mode 100644
index 0000000..8fe16f4
--- /dev/null
+++ b/ryu/lib/docker/ryubgp.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import time
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+class RyuBGPContainer(base.BGPContainer):
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/ryu'
+
+ def __init__(self, name, asn, router_id, ctn_image_name):
+ super(RyuBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
+ self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
+ self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
+ self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
+
+ def _create_config_ryu(self):
+ c = base.CmdBuffer()
+ c << '[DEFAULT]'
+ c << 'verbose=True'
+ c << 'log_file=/etc/ryu/manager.log'
+ with open(self.RYU_CONF, 'w') as f:
+ LOG.info("[%s's new config]" % self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def _create_config_ryu_bgp(self):
+ c = base.CmdBuffer()
+ c << 'import os'
+ c << ''
+ c << 'BGP = {'
+ c << " 'local_as': %s," % str(self.asn)
+ c << " 'router_id': '%s'," % self.router_id
+ c << " 'neighbors': ["
+ c << " {"
+ for peer, info in self.peers.items():
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << " 'address': '%s'," % n_addr
+ c << " 'remote_as': %s," % str(peer.asn)
+ c << " 'enable_ipv4': True,"
+ c << " 'enable_ipv6': True,"
+ c << " 'enable_vpnv4': True,"
+ c << " 'enable_vpnv6': True,"
+ c << ' },'
+ c << ' ],'
+ c << " 'routes': ["
+ for route in self.routes.values():
+ c << " {"
+ c << " 'prefix': '%s'," % route['prefix']
+ c << " },"
+ c << " ],"
+ c << "}"
+ log_conf = """LOGGING = {
+
+ # We use python logging package for logging.
+ 'version': 1,
+ 'disable_existing_loggers': False,
+
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s ' +
+ '[%(process)d %(thread)d] %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(asctime)s %(module)s %(lineno)s ' +
+ '%(message)s'
+ },
+ 'stats': {
+ 'format': '%(message)s'
+ },
+ },
+
+ 'handlers': {
+ # Outputs log to console.
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ 'console_stats': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'stats'
+ },
+ # Rotates log file when its size reaches 10MB.
+ 'log_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'bgpspeaker.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'verbose'
+ },
+ 'stats_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'statistics_bgps.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'stats'
+ },
+ },
+
+ # Fine-grained control of logging per instance.
+ 'loggers': {
+ 'bgpspeaker': {
+ 'handlers': ['console', 'log_file'],
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'stats': {
+ 'handlers': ['stats_file', 'console_stats'],
+ 'level': 'INFO',
+ 'propagate': False,
+ 'formatter': 'stats',
+ },
+ },
+
+ # Root loggers.
+ 'root': {
+ 'handlers': ['console', 'log_file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+}"""
+ c << log_conf
+ with open(os.path.join(self.config_dir, 'bgp_conf.py'), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def create_config(self):
+ self._create_config_ryu()
+ self._create_config_ryu_bgp()
+
+ def is_running_ryu(self):
+ results = self.exec_on_ctn('ps ax')
+ running = False
+ for line in results.split('\n')[1:]:
+ if 'ryu-manager' in line:
+ running = True
+ return running
+
+ def start_ryubgp(self, check_running=True, retry=False):
+ if check_running:
+ if self.is_running_ryu():
+ return True
+ result = False
+ if retry:
+ try_times = 3
+ else:
+ try_times = 1
+ cmd = "ryu-manager --verbose "
+ cmd += "--config-file %s " % self.SHARED_RYU_CONF
+ cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
+ cmd += "ryu.services.protocols.bgp.application"
+ for _ in range(try_times):
+ self.exec_on_ctn(cmd, detach=True)
+ if self.is_running_ryu():
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ def stop_ryubgp(self, check_running=True, retry=False):
+ if check_running:
+ if not self.is_running_ryu():
+ return True
+ result = False
+ if retry:
+ try_times = 3
+ else:
+ try_times = 1
+ for _ in range(try_times):
+ cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
+ self.exec_on_ctn(cmd)
+ if not self.is_running_ryu():
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ def run(self, wait=False, w_time=WAIT_FOR_BOOT):
+ w_time = super(RyuBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ def reload_config(self):
+ self.stop_ryubgp(retry=True)
+ self.start_ryubgp(retry=True)
diff --git a/ryu/tests/integrated/common/__init__.py b/ryu/tests/integrated/common/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/ryu/tests/integrated/common/docker_base.py b/ryu/tests/integrated/common/docker_base.py
deleted file mode 100644
index 1ae2cc2..0000000
--- a/ryu/tests/integrated/common/docker_base.py
+++ /dev/null
@@ -1,801 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/base.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import itertools
-import logging
-import os
-import subprocess
-import time
-
-import netaddr
-import six
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_TEST_PREFIX = ''
-DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
-TEST_PREFIX = DEFAULT_TEST_PREFIX
-TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
-
-BGP_FSM_IDLE = 'BGP_FSM_IDLE'
-BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
-BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
-
-BGP_ATTR_TYPE_ORIGIN = 1
-BGP_ATTR_TYPE_AS_PATH = 2
-BGP_ATTR_TYPE_NEXT_HOP = 3
-BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
-BGP_ATTR_TYPE_LOCAL_PREF = 5
-BGP_ATTR_TYPE_COMMUNITIES = 8
-BGP_ATTR_TYPE_ORIGINATOR_ID = 9
-BGP_ATTR_TYPE_CLUSTER_LIST = 10
-BGP_ATTR_TYPE_MP_REACH_NLRI = 14
-BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
-
-BRIDGE_TYPE_DOCKER = 'docker'
-BRIDGE_TYPE_BRCTL = 'brctl'
-BRIDGE_TYPE_OVS = 'ovs'
-
-
-class CommandError(Exception):
- def __init__(self, out):
- super(CommandError, self).__init__()
- self.out = out
-
-
-def try_several_times(f, t=3, s=1):
- e = RuntimeError()
- for _ in range(t):
- try:
- r = f()
- except RuntimeError as e:
- time.sleep(s)
- else:
- return r
- raise e
-
-
-class CmdBuffer(list):
- def __init__(self, delim='\n'):
- super(CmdBuffer, self).__init__()
- self.delim = delim
-
- def __lshift__(self, value):
- self.append(value)
-
- def __str__(self):
- return self.delim.join(self)
-
-
-class CommandOut(str):
-
- def __new__(cls, stdout, stderr, command, returncode, **kwargs):
- stdout = stdout or ''
- obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
- obj.stderr = stderr or ''
- obj.command = command
- obj.returncode = returncode
- return obj
-
-
-class Command(object):
-
- def _execute(self, cmd, capture=False, executable=None):
- """Execute a command using subprocess.Popen()
- :Parameters:
- - out: stdout from subprocess.Popen()
- out has some attributes.
- out.returncode: returncode of subprocess.Popen()
- out.stderr: stderr from subprocess.Popen()
- """
- if capture:
- p_stdout = subprocess.PIPE
- p_stderr = subprocess.PIPE
- else:
- p_stdout = None
- p_stderr = None
- pop = subprocess.Popen(cmd, shell=True, executable=executable,
- stdout=p_stdout,
- stderr=p_stderr)
- __stdout, __stderr = pop.communicate()
- _stdout = six.text_type(__stdout, 'utf-8')
- _stderr = six.text_type(__stderr, 'utf-8')
- out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
- return out
-
- def execute(self, cmd, capture=True, try_times=1, interval=1):
- out = None
- for i in range(try_times):
- out = self._execute(cmd, capture=capture)
- LOG.info(out.command)
- if out.returncode == 0:
- return out
- LOG.error("stdout: %s", out)
- LOG.error("stderr: %s", out.stderr)
- if i + 1 >= try_times:
- break
- time.sleep(interval)
- raise CommandError(out)
-
- def sudo(self, cmd, capture=True, try_times=1, interval=1):
- cmd = 'sudo %s' % cmd
- return self.execute(cmd, capture=capture,
- try_times=try_times, interval=interval)
-
-
-class DockerImage(object):
- def __init__(self, baseimage='ubuntu:16.04'):
- self.baseimage = baseimage
- self.cmd = Command()
-
- def get_images(self):
- out = self.cmd.sudo('sudo docker images')
- images = []
- for line in out.splitlines()[1:]:
- images.append(line.split()[0])
- return images
-
- def exist(self, name):
- return name in self.get_images()
-
- def build(self, tagname, dockerfile_dir):
- self.cmd.sudo(
- "docker build -t {0} {1}".format(tagname, dockerfile_dir),
- try_times=3)
-
- def remove(self, tagname, check_exist=False):
- if check_exist and not self.exist(tagname):
- return tagname
- self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
-
- def create_quagga(self, tagname='quagga', image=None, check_exist=False):
- if check_exist and self.exist(tagname):
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- pkges = ' '.join([
- 'telnet',
- 'tcpdump',
- 'quagga',
- ])
- if image:
- use_image = image
- else:
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'RUN apt-get update'
- c << 'RUN apt-get install -qy --no-install-recommends %s' % pkges
- c << 'CMD /usr/lib/quagga/bgpd'
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
- self.build(tagname, workdir)
- return tagname
-
- def create_ryu(self, tagname='ryu', image=None, check_exist=False):
- if check_exist and self.exist(tagname):
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- workdir_ctn = '/root/osrg/ryu'
- pkges = ' '.join([
- 'tcpdump',
- 'iproute2',
- ])
- if image:
- use_image = image
- else:
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'ADD ryu %s' % workdir_ctn
- install = ' '.join([
- 'RUN apt-get update',
- '&& apt-get install -qy --no-install-recommends %s' % pkges,
- '&& cd %s' % workdir_ctn,
- # Note: Clean previous builds, because "python setup.py install"
- # might fail if the current directory contains the symlink to
- # Docker host file systems.
- '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
- '&& pip install -r tools/pip-requires -r tools/optional-requires',
- '&& python setup.py install',
- ])
- c << install
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
- self.cmd.execute('cp -r ../ryu %s/' % workdir)
- self.build(tagname, workdir)
- return tagname
-
-
-class Bridge(object):
- def __init__(self, name, subnet='', start_ip=None, end_ip=None,
- with_ip=True, self_ip=False,
- fixed_ip=None, reuse=False,
- br_type='docker'):
- """Manage a bridge
- :Parameters:
- - name: bridge name
- - subnet: network cider to be used in this bridge
- - start_ip: start address of an ip to be used in the subnet
- - end_ip: end address of an ip to be used in the subnet
- - with_ip: specify if assign automatically an ip address
- - self_ip: specify if assign an ip address for the bridge
- - fixed_ip: an ip address to be assigned to the bridge
- - reuse: specify if use an existing bridge
- - br_type: One either in a 'docker', 'brctl' or 'ovs'
- """
- self.cmd = Command()
- self.name = name
- if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
- BRIDGE_TYPE_OVS):
- raise Exception("argument error br_type: %s" % br_type)
- self.br_type = br_type
- self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
- if TEST_PREFIX != '':
- self.name = '{0}_{1}'.format(TEST_PREFIX, name)
- self.with_ip = with_ip
- if with_ip:
- self.subnet = netaddr.IPNetwork(subnet)
- if start_ip:
- self.start_ip = start_ip
- else:
- self.start_ip = netaddr.IPAddress(self.subnet.first)
- if end_ip:
- self.end_ip = end_ip
- else:
- self.end_ip = netaddr.IPAddress(self.subnet.last)
-
- def _ip_gen():
- for host in netaddr.IPRange(self.start_ip, self.end_ip):
- yield host
- self._ip_generator = _ip_gen()
- # throw away first network address
- self.next_ip_address()
-
- self.self_ip = self_ip
- if fixed_ip:
- self.ip_addr = fixed_ip
- else:
- self.ip_addr = self.next_ip_address()
- if not reuse:
- def f():
- if self.br_type == BRIDGE_TYPE_DOCKER:
- gw = "--gateway %s" % self.ip_addr.split('/')[0]
- v6 = ''
- if self.subnet.version == 6:
- v6 = '--ipv6'
- cmd = ("docker network create --driver bridge %s "
- "%s --subnet %s %s" % (v6, gw, subnet, self.name))
- elif self.br_type == BRIDGE_TYPE_BRCTL:
- cmd = "ip link add {0} type bridge".format(self.name)
- elif self.br_type == BRIDGE_TYPE_OVS:
- cmd = "ovs-vsctl add-br {0}".format(self.name)
- else:
- raise ValueError('Unsupported br_type: %s' % self.br_type)
- self.delete()
- self.execute(cmd, sudo=True, retry=True)
- try_several_times(f)
- if not self.docker_nw:
- self.execute("ip link set up dev {0}".format(self.name),
- sudo=True, retry=True)
-
- if not self.docker_nw and self_ip:
- ips = self.check_br_addr(self.name)
- for key, ip in ips.items():
- if self.subnet.version == key:
- self.execute(
- "ip addr del {0} dev {1}".format(ip, self.name),
- sudo=True, retry=True)
- self.execute(
- "ip addr add {0} dev {1}".format(self.ip_addr, self.name),
- sudo=True, retry=True)
- self.ctns = []
-
- def get_bridges_dc(self):
- out = self.execute('docker network ls', sudo=True, retry=True)
- bridges = []
- for line in out.splitlines()[1:]:
- bridges.append(line.split()[1])
- return bridges
-
- def get_bridges_brctl(self):
- out = self.execute('brctl show', retry=True)
- bridges = []
- for line in out.splitlines()[1:]:
- bridges.append(line.split()[0])
- return bridges
-
- def get_bridges_ovs(self):
- out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
- return out.splitlines()
-
- def get_bridges(self):
- if self.br_type == BRIDGE_TYPE_DOCKER:
- return self.get_bridges_dc()
- elif self.br_type == BRIDGE_TYPE_BRCTL:
- return self.get_bridges_brctl()
- elif self.br_type == BRIDGE_TYPE_OVS:
- return self.get_bridges_ovs()
-
- def exist(self):
- return self.name in self.get_bridges()
-
- def execute(self, cmd, capture=True, sudo=False, retry=False):
- if sudo:
- m = self.cmd.sudo
- else:
- m = self.cmd.execute
- if retry:
- return m(cmd, capture=capture, try_times=3, interval=1)
- else:
- return m(cmd, capture=capture)
-
- def check_br_addr(self, br):
- ips = {}
- cmd = "ip a show dev %s" % br
- for line in self.execute(cmd, sudo=True).split('\n'):
- if line.strip().startswith("inet "):
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[4] = elems[1]
- elif line.strip().startswith("inet6 "):
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[6] = elems[1]
- return ips
-
- def next_ip_address(self):
- return "{0}/{1}".format(next(self._ip_generator),
- self.subnet.prefixlen)
-
- def addif(self, ctn):
- name = ctn.next_if_name()
- self.ctns.append(ctn)
- ip_address = None
- if self.docker_nw:
- ipv4 = None
- ipv6 = None
- ip_address = self.next_ip_address()
- ip_address_ip = ip_address.split('/')[0]
- version = 4
- if netaddr.IPNetwork(ip_address).version == 6:
- version = 6
- opt_ip = "--ip %s" % ip_address_ip
- if version == 4:
- ipv4 = ip_address
- else:
- opt_ip = "--ip6 %s" % ip_address_ip
- ipv6 = ip_address
- cmd = "docker network connect %s %s %s" % (
- opt_ip, self.name, ctn.docker_name())
- self.execute(cmd, sudo=True)
- ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
- ifname=name)
- else:
- if self.with_ip:
- ip_address = self.next_ip_address()
- version = 4
- if netaddr.IPNetwork(ip_address).version == 6:
- version = 6
- ctn.pipework(self, ip_address, name, version=version)
- else:
- ctn.pipework(self, '0/0', name)
- return ip_address
-
- def delete(self, check_exist=True):
- if check_exist:
- if not self.exist():
- return
- if self.br_type == BRIDGE_TYPE_DOCKER:
- self.execute("docker network rm %s" % self.name,
- sudo=True, retry=True)
- elif self.br_type == BRIDGE_TYPE_BRCTL:
- self.execute("ip link set down dev %s" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ip link delete %s type bridge" % self.name,
- sudo=True, retry=True)
- elif self.br_type == BRIDGE_TYPE_OVS:
- self.execute(
- "ovs-vsctl del-br %s" % self.name,
- sudo=True, retry=True)
-
-
-class Container(object):
- def __init__(self, name, image=None):
- self.name = name
- self.image = image
- self.shared_volumes = []
- self.ip_addrs = []
- self.ip6_addrs = []
- self.is_running = False
- self.eths = []
- self.id = None
-
- self.cmd = Command()
- self.remove()
-
- def docker_name(self):
- if TEST_PREFIX == DEFAULT_TEST_PREFIX:
- return self.name
- return '{0}_{1}'.format(TEST_PREFIX, self.name)
-
- def get_docker_id(self):
- if self.id:
- return self.id
- else:
- return self.docker_name()
-
- def next_if_name(self):
- name = 'eth{0}'.format(len(self.eths) + 1)
- self.eths.append(name)
- return name
-
- def set_addr_info(self, bridge, ipv4=None, ipv6=None, ifname='eth0'):
- if ipv4:
- self.ip_addrs.append((ifname, ipv4, bridge))
- if ipv6:
- self.ip6_addrs.append((ifname, ipv6, bridge))
-
- def get_addr_info(self, bridge, ipv=4):
- addrinfo = {}
- if ipv == 4:
- ip_addrs = self.ip_addrs
- elif ipv == 6:
- ip_addrs = self.ip6_addrs
- else:
- return None
- for addr in ip_addrs:
- if addr[2] == bridge:
- addrinfo[addr[1]] = addr[0]
- return addrinfo
-
- def execute(self, cmd, capture=True, sudo=False, retry=False):
- if sudo:
- m = self.cmd.sudo
- else:
- m = self.cmd.execute
- if retry:
- return m(cmd, capture=capture, try_times=3, interval=1)
- else:
- return m(cmd, capture=capture)
-
- def dcexec(self, cmd, capture=True, retry=False):
- if retry:
- return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
- else:
- return self.cmd.sudo(cmd, capture=capture)
-
- def exec_on_ctn(self, cmd, capture=True, detach=False):
- name = self.docker_name()
- flag = '-d' if detach else ''
- return self.dcexec('docker exec {0} {1} {2}'.format(
- flag, name, cmd), capture=capture)
-
- def get_containers(self, allctn=False):
- cmd = 'docker ps --no-trunc=true'
- if allctn:
- cmd += ' --all=true'
- out = self.dcexec(cmd, retry=True)
- containers = []
- for line in out.splitlines()[1:]:
- containers.append(line.split()[-1])
- return containers
-
- def exist(self, allctn=False):
- return self.docker_name() in self.get_containers(allctn=allctn)
-
- def run(self):
- c = CmdBuffer(' ')
- c << "docker run --privileged=true"
- for sv in self.shared_volumes:
- c << "-v {0}:{1}".format(sv[0], sv[1])
- c << "--name {0} --hostname {0} -id {1}".format(self.docker_name(),
- self.image)
- self.id = self.dcexec(str(c), retry=True)
- self.is_running = True
- self.exec_on_ctn("ip li set up dev lo")
- ipv4 = None
- ipv6 = None
- for line in self.exec_on_ctn("ip a show dev eth0").split('\n'):
- if line.strip().startswith("inet "):
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv4 = elems[1]
- elif line.strip().startswith("inet6 "):
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv6 = elems[1]
- self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
- ifname='eth0')
- return 0
-
- def stop(self, check_exist=True):
- if check_exist:
- if not self.exist(allctn=False):
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- def remove(self, check_exist=True):
- if check_exist:
- if not self.exist(allctn=True):
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- def pipework(self, bridge, ip_addr, intf_name="", version=4):
- if not self.is_running:
- LOG.warning('Call run() before pipeworking')
- return
- c = CmdBuffer(' ')
- c << "pipework {0}".format(bridge.name)
-
- if intf_name != "":
- c << "-i {0}".format(intf_name)
- else:
- intf_name = "eth1"
- ipv4 = None
- ipv6 = None
- if version == 4:
- ipv4 = ip_addr
- else:
- c << '-a 6'
- ipv6 = ip_addr
- c << "{0} {1}".format(self.docker_name(), ip_addr)
- self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
- ifname=intf_name)
- self.execute(str(c), sudo=True, retry=True)
-
- def get_pid(self):
- if self.is_running:
- cmd = "docker inspect -f '{{.State.Pid}}' %s" % self.docker_name()
- return int(self.dcexec(cmd))
- return -1
-
- def start_tcpdump(self, interface=None, filename=None):
- if not interface:
- interface = "eth0"
- if not filename:
- filename = "{0}/{1}.dump".format(
- self.shared_volumes[0][1], interface)
- self.exec_on_ctn(
- "tcpdump -i {0} -w {1}".format(interface, filename),
- detach=True)
-
-
-class BGPContainer(Container):
-
- WAIT_FOR_BOOT = 1
- RETRY_INTERVAL = 5
- DEFAULT_PEER_ARG = {'neigh_addr': '',
- 'passwd': None,
- 'vpn': False,
- 'flowspec': False,
- 'is_rs_client': False,
- 'is_rr_client': False,
- 'cluster_id': None,
- 'policies': None,
- 'passive': False,
- 'local_addr': '',
- 'as2': False,
- 'graceful_restart': None,
- 'local_as': None,
- 'prefix_limit': None}
- default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
- DEFAULT_ROUTE_ARG = {'prefix': None,
- 'rf': 'ipv4',
- 'attr': None,
- 'next-hop': None,
- 'as-path': None,
- 'community': None,
- 'med': None,
- 'local-pref': None,
- 'extended-community': None,
- 'matchs': None,
- 'thens': None}
- default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
-
- def __init__(self, name, asn, router_id, ctn_image_name=None):
- self.config_dir = TEST_BASE_DIR
- if TEST_PREFIX:
- self.config_dir = os.path.join(self.config_dir, TEST_PREFIX)
- self.config_dir = os.path.join(self.config_dir, name)
- self.asn = asn
- self.router_id = router_id
- self.peers = {}
- self.routes = {}
- self.policies = {}
- super(BGPContainer, self).__init__(name, ctn_image_name)
- self.execute(
- 'rm -rf {0}'.format(self.config_dir), sudo=True)
- self.execute('mkdir -p {0}'.format(self.config_dir))
- self.execute('chmod 777 {0}'.format(self.config_dir))
-
- def __repr__(self):
- return str({'name': self.name, 'asn': self.asn,
- 'router_id': self.router_id})
-
- def run(self, wait=False, w_time=WAIT_FOR_BOOT):
- self.create_config()
- super(BGPContainer, self).run()
- if wait:
- time.sleep(w_time)
- return w_time
-
- def add_peer(self, peer, bridge='', reload_config=True, v6=False,
- peer_info=None):
- peer_info = peer_info or {}
- self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
- self.peers[peer].update(peer_info)
- peer_keys = sorted(self.peers[peer].keys())
- if peer_keys != self.default_peer_keys:
- raise Exception("argument error peer_info: %s" % peer_info)
-
- neigh_addr = ''
- local_addr = ''
- it = itertools.product(self.ip_addrs, peer.ip_addrs)
- if v6:
- it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
-
- for me, you in it:
- if bridge != '' and bridge != me[2]:
- continue
- if me[2] == you[2]:
- neigh_addr = you[1]
- local_addr = me[1]
- if v6:
- addr, mask = local_addr.split('/')
- local_addr = "{0}%{1}/{2}".format(addr, me[0], mask)
- break
-
- if neigh_addr == '':
- raise Exception('peer {0} seems not ip reachable'.format(peer))
-
- if not self.peers[peer]['policies']:
- self.peers[peer]['policies'] = {}
-
- self.peers[peer]['neigh_addr'] = neigh_addr
- self.peers[peer]['local_addr'] = local_addr
- if self.is_running and reload_config:
- self.create_config()
- self.reload_config()
-
- def del_peer(self, peer, reload_config=True):
- del self.peers[peer]
- if self.is_running and reload_config:
- self.create_config()
- self.reload_config()
-
- def disable_peer(self, peer):
- raise NotImplementedError()
-
- def enable_peer(self, peer):
- raise NotImplementedError()
-
- def log(self):
- return self.execute('cat {0}/*.log'.format(self.config_dir))
-
- def add_route(self, route, reload_config=True, route_info=None):
- route_info = route_info or {}
- self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
- self.routes[route].update(route_info)
- route_keys = sorted(self.routes[route].keys())
- if route_keys != self.default_route_keys:
- raise Exception("argument error route_info: %s" % route_info)
- self.routes[route]['prefix'] = route
- if self.is_running and reload_config:
- self.create_config()
- self.reload_config()
-
- def add_policy(self, policy, peer, typ, default='accept',
- reload_config=True):
- self.set_default_policy(peer, typ, default)
- self.define_policy(policy)
- self.assign_policy(peer, policy, typ)
- if self.is_running and reload_config:
- self.create_config()
- self.reload_config()
-
- def set_default_policy(self, peer, typ, default):
- if (typ in ['in', 'out', 'import', 'export'] and
- default in ['reject', 'accept']):
- if 'default-policy' not in self.peers[peer]:
- self.peers[peer]['default-policy'] = {}
- self.peers[peer]['default-policy'][typ] = default
- else:
- raise Exception('wrong type or default')
-
- def define_policy(self, policy):
- self.policies[policy['name']] = policy
-
- def assign_policy(self, peer, policy, typ):
- if peer not in self.peers:
- raise Exception('peer {0} not found'.format(peer.name))
- name = policy['name']
- if name not in self.policies:
- raise Exception('policy {0} not found'.format(name))
- self.peers[peer]['policies'][typ] = policy
-
- def get_local_rib(self, peer, rf):
- raise NotImplementedError()
-
- def get_global_rib(self, rf):
- raise NotImplementedError()
-
- def get_neighbor_state(self, peer_id):
- raise NotImplementedError()
-
- def get_reachablily(self, prefix, timeout=20):
- version = netaddr.IPNetwork(prefix).version
- addr = prefix.split('/')[0]
- if version == 4:
- ping_cmd = 'ping'
- elif version == 6:
- ping_cmd = 'ping6'
- else:
- raise Exception(
- 'unsupported route family: {0}'.format(version))
- cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format(
- ping_cmd, addr)
- interval = 1
- count = 0
- while True:
- res = self.exec_on_ctn(cmd)
- LOG.info(res)
- if '1 packets received' in res and '0% packet loss':
- break
- time.sleep(interval)
- count += interval
- if count >= timeout:
- raise Exception('timeout')
- return True
-
- def wait_for(self, expected_state, peer, timeout=120):
- interval = 1
- count = 0
- while True:
- state = self.get_neighbor_state(peer)
- LOG.info("%s's peer %s state: %s",
- self.router_id, peer.router_id, state)
- if state == expected_state:
- return
-
- time.sleep(interval)
- count += interval
- if count >= timeout:
- raise Exception('timeout')
-
- def add_static_route(self, network, next_hop):
- cmd = '/sbin/ip route add {0} via {1}'.format(network, next_hop)
- self.exec_on_ctn(cmd)
-
- def set_ipv6_forward(self):
- cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
- self.exec_on_ctn(cmd)
-
- def create_config(self):
- raise NotImplementedError()
-
- def reload_config(self):
- raise NotImplementedError()
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg.sh b/ryu/tests/integrated/common/install_docker_test_pkg.sh
deleted file mode 100644
index a771dfc..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-function add_docker_aptline {
- sudo apt-get update
- if ! apt-cache search docker-engine | grep docker-engine; then
- VER=`lsb_release -r`
- if echo $VER | grep 12.04; then
- REL_NAME=precise
- elif echo $VER | grep 14.04; then
- REL_NAME=trusty
- elif echo $VER | grep 15.10; then
- REL_NAME=wily
- elif echo $VER | grep 16.04; then
- REL_NAME=xenial
- else
- retrun 1
- fi
- RELEASE=ubuntu-$REL_NAME
- sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
- sudo sh -c "echo deb https://apt.dockerproject.org/repo $RELEASE main > /etc/apt/sources.list.d/docker.list"
- fi
-}
-
-init_variables
-process_options "$@"
-
-if [ $APTLINE_DOCKER -eq 1 ]; then
- add_docker_aptline
-fi
-
-sudo apt-get update
-if apt-cache search docker-engine | grep docker-engine; then
- DOCKER_PKG=docker-engine
-else
- DOCKER_PKG=docker.io
-fi
-sudo apt-get install -y $DOCKER_PKG
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_common.sh b/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
deleted file mode 100644
index 44a3e10..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-set -ex
-
-function init_variables {
- APTLINE_DOCKER=0
- DIR_BASE=/tmp
-}
-
-function process_options {
- local max
- local i
- max=$#
- i=1
- while [ $i -le $max ]; do
- case "$1" in
- -a|--add-docker-aptline)
- APTLINE_DOCKER=1
- ;;
- -d|--download-dir)
- shift; ((i++))
- DIR_BASE=$1
- ;;
- esac
- shift; ((i++))
- done
-}
-
-function install_pipework {
- if ! which /usr/local/bin/pipework >/dev/null
- then
- sudo rm -rf $DIR_BASE/pipework
- git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework
- sudo install -m 0755 $DIR_BASE/pipework/pipework /usr/local/bin/pipework
- fi
-}
-
-function install_depends_pkg {
- install_pipework
-}
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh b/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
deleted file mode 100644
index d8c3b49..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-init_variables
-process_options "$@"
-
-sudo apt-get update
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/quagga.py b/ryu/tests/integrated/common/quagga.py
deleted file mode 100644
index 9b6d218..0000000
--- a/ryu/tests/integrated/common/quagga.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/quagga.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-
-import netaddr
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-class QuaggaBGPContainer(base.BGPContainer):
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/quagga'
-
- def __init__(self, name, asn, router_id, ctn_image_name, zebra=False):
- super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
- self.zebra = zebra
- self._create_config_debian()
-
- def run(self, wait=False, w_time=WAIT_FOR_BOOT):
- w_time = super(QuaggaBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- def get_global_rib(self, prefix='', rf='ipv4'):
- rib = []
- if prefix != '':
- return self.get_global_rib_with_prefix(prefix, rf)
-
- out = self.vtysh('show bgp {0} unicast'.format(rf), config=False)
- if out.startswith('No BGP network exists'):
- return rib
-
- read_next = False
-
- for line in out.split('\n'):
- ibgp = False
- if line[:2] == '*>':
- line = line[2:]
- if line[0] == 'i':
- line = line[1:]
- ibgp = True
- elif not read_next:
- continue
-
- elems = line.split()
-
- if len(elems) == 1:
- read_next = True
- prefix = elems[0]
- continue
- elif read_next:
- nexthop = elems[0]
- else:
- prefix = elems[0]
- nexthop = elems[1]
- read_next = False
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'ibgp': ibgp})
-
- return rib
-
- def get_global_rib_with_prefix(self, prefix, rf):
- rib = []
-
- lines = [line.strip() for line in self.vtysh(
- 'show bgp {0} unicast {1}'.format(rf, prefix),
- config=False).split('\n')]
-
- if lines[0] == '% Network not in table':
- return rib
-
- lines = lines[2:]
-
- if lines[0].startswith('Not advertised'):
- lines.pop(0) # another useless line
- elif lines[0].startswith('Advertised to non peer-group peers:'):
- lines = lines[2:] # other useless lines
- else:
- raise Exception('unknown output format {0}'.format(lines))
-
- if lines[0] == 'Local':
- aspath = []
- else:
- aspath = [int(asn) for asn in lines[0].split()]
-
- nexthop = lines[1].split()[0].strip()
- info = [s.strip(',') for s in lines[2].split()]
- attrs = []
- if 'metric' in info:
- med = info[info.index('metric') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
- 'metric': int(med)})
- if 'localpref' in info:
- localpref = info[info.index('localpref') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
- 'value': int(localpref)})
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'aspath': aspath, 'attrs': attrs})
-
- return rib
-
- def get_neighbor_state(self, peer):
- if peer not in self.peers:
- raise Exception('not found peer {0}'.format(peer.router_id))
-
- neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
-
- info = [l.strip() for l in self.vtysh(
- 'show bgp neighbors {0}'.format(neigh_addr),
- config=False).split('\n')]
-
- if not info[0].startswith('BGP neighbor is'):
- raise Exception('unknown format')
-
- idx1 = info[0].index('BGP neighbor is ')
- idx2 = info[0].index(',')
- n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
- if n_addr == neigh_addr:
- idx1 = info[2].index('= ')
- state = info[2][idx1 + len('= '):]
- if state.startswith('Idle'):
- return base.BGP_FSM_IDLE
- elif state.startswith('Active'):
- return base.BGP_FSM_ACTIVE
- elif state.startswith('Established'):
- return base.BGP_FSM_ESTABLISHED
- else:
- return state
-
- raise Exception('not found peer {0}'.format(peer.router_id))
-
- def send_route_refresh(self):
- self.vtysh('clear ip bgp * soft', config=False)
-
- def create_config(self):
- zebra = 'no'
- self._create_config_bgp()
- if self.zebra:
- zebra = 'yes'
- self._create_config_zebra()
- self._create_config_daemons(zebra)
-
- def _create_config_debian(self):
- c = base.CmdBuffer()
- c << 'vtysh_enable=yes'
- c << 'zebra_options=" --daemon -A 127.0.0.1"'
- c << 'bgpd_options=" --daemon -A 127.0.0.1"'
- c << 'ospfd_options=" --daemon -A 127.0.0.1"'
- c << 'ospf6d_options=" --daemon -A ::1"'
- c << 'ripd_options=" --daemon -A 127.0.0.1"'
- c << 'ripngd_options=" --daemon -A ::1"'
- c << 'isisd_options=" --daemon -A 127.0.0.1"'
- c << 'babeld_options=" --daemon -A 127.0.0.1"'
- c << 'watchquagga_enable=yes'
- c << 'watchquagga_options=(--daemon)'
- with open('{0}/debian.conf'.format(self.config_dir), 'w') as f:
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- def _create_config_daemons(self, zebra='no'):
- c = base.CmdBuffer()
- c << 'zebra=%s' % zebra
- c << 'bgpd=yes'
- c << 'ospfd=no'
- c << 'ospf6d=no'
- c << 'ripd=no'
- c << 'ripngd=no'
- c << 'isisd=no'
- c << 'babeld=no'
- with open('{0}/daemons'.format(self.config_dir), 'w') as f:
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- def _create_config_bgp(self):
-
- c = base.CmdBuffer()
- c << 'hostname bgpd'
- c << 'password zebra'
- c << 'router bgp {0}'.format(self.asn)
- c << 'bgp router-id {0}'.format(self.router_id)
- if any(info['graceful_restart'] for info in self.peers.values()):
- c << 'bgp graceful-restart'
-
- version = 4
- for peer, info in self.peers.items():
- version = netaddr.IPNetwork(info['neigh_addr']).version
- n_addr = info['neigh_addr'].split('/')[0]
- if version == 6:
- c << 'no bgp default ipv4-unicast'
-
- c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
- if info['is_rs_client']:
- c << 'neighbor {0} route-server-client'.format(n_addr)
- for typ, p in info['policies'].items():
- c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
- typ)
- if info['passwd']:
- c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
- if info['passive']:
- c << 'neighbor {0} passive'.format(n_addr)
- if version == 6:
- c << 'address-family ipv6 unicast'
- c << 'neighbor {0} activate'.format(n_addr)
- c << 'exit-address-family'
-
- for route in self.routes.values():
- if route['rf'] == 'ipv4':
- c << 'network {0}'.format(route['prefix'])
- elif route['rf'] == 'ipv6':
- c << 'address-family ipv6 unicast'
- c << 'network {0}'.format(route['prefix'])
- c << 'exit-address-family'
- else:
- raise Exception(
- 'unsupported route faily: {0}'.format(route['rf']))
-
- if self.zebra:
- if version == 6:
- c << 'address-family ipv6 unicast'
- c << 'redistribute connected'
- c << 'exit-address-family'
- else:
- c << 'redistribute connected'
-
- for name, policy in self.policies.items():
- c << 'access-list {0} {1} {2}'.format(name, policy['type'],
- policy['match'])
- c << 'route-map {0} permit 10'.format(name)
- c << 'match ip address {0}'.format(name)
- c << 'set metric {0}'.format(policy['med'])
-
- c << 'debug bgp as4'
- c << 'debug bgp fsm'
- c << 'debug bgp updates'
- c << 'debug bgp events'
- c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
-
- with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f:
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- def _create_config_zebra(self):
- c = base.CmdBuffer()
- c << 'hostname zebra'
- c << 'password zebra'
- c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
- c << 'debug zebra packet'
- c << 'debug zebra kernel'
- c << 'debug zebra rib'
- c << ''
-
- with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- def vtysh(self, cmd, config=True):
- if not isinstance(cmd, list):
- cmd = [cmd]
- cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
- if config:
- return self.exec_on_ctn(
- "vtysh -d bgpd -c 'en' -c 'conf t' -c "
- "'router bgp {0}' {1}".format(self.asn, cmd),
- capture=True)
- else:
- return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
- capture=True)
-
- def reload_config(self):
- daemon = []
- daemon.append('bgpd')
- if self.zebra:
- daemon.append('zebra')
- for d in daemon:
- cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
- self.exec_on_ctn(cmd, capture=True)
-
-
-class RawQuaggaBGPContainer(QuaggaBGPContainer):
- def __init__(self, name, config, ctn_image_name,
- zebra=False):
- asn = None
- router_id = None
- for line in config.split('\n'):
- line = line.strip()
- if line.startswith('router bgp'):
- asn = int(line[len('router bgp'):].strip())
- if line.startswith('bgp router-id'):
- router_id = line[len('bgp router-id'):].strip()
- if not asn:
- raise Exception('asn not in quagga config')
- if not router_id:
- raise Exception('router-id not in quagga config')
- self.config = config
- super(RawQuaggaBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name, zebra)
-
- def create_config(self):
- with open(os.path.join(self.config_dir, 'bgpd.conf'), 'w') as f:
- LOG.info("[%s's new config]", self.name)
- LOG.info(self.config)
- f.writelines(self.config)
diff --git a/ryu/tests/integrated/common/ryubgp.py b/ryu/tests/integrated/common/ryubgp.py
deleted file mode 100644
index 8fe16f4..0000000
--- a/ryu/tests/integrated/common/ryubgp.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-import time
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-class RyuBGPContainer(base.BGPContainer):
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/ryu'
-
- def __init__(self, name, asn, router_id, ctn_image_name):
- super(RyuBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
- self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
- self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
- self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
-
- def _create_config_ryu(self):
- c = base.CmdBuffer()
- c << '[DEFAULT]'
- c << 'verbose=True'
- c << 'log_file=/etc/ryu/manager.log'
- with open(self.RYU_CONF, 'w') as f:
- LOG.info("[%s's new config]" % self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- def _create_config_ryu_bgp(self):
- c = base.CmdBuffer()
- c << 'import os'
- c << ''
- c << 'BGP = {'
- c << " 'local_as': %s," % str(self.asn)
- c << " 'router_id': '%s'," % self.router_id
- c << " 'neighbors': ["
- c << " {"
- for peer, info in self.peers.items():
- n_addr = info['neigh_addr'].split('/')[0]
- c << " 'address': '%s'," % n_addr
- c << " 'remote_as': %s," % str(peer.asn)
- c << " 'enable_ipv4': True,"
- c << " 'enable_ipv6': True,"
- c << " 'enable_vpnv4': True,"
- c << " 'enable_vpnv6': True,"
- c << ' },'
- c << ' ],'
- c << " 'routes': ["
- for route in self.routes.values():
- c << " {"
- c << " 'prefix': '%s'," % route['prefix']
- c << " },"
- c << " ],"
- c << "}"
- log_conf = """LOGGING = {
-
- # We use python logging package for logging.
- 'version': 1,
- 'disable_existing_loggers': False,
-
- 'formatters': {
- 'verbose': {
- 'format': '%(levelname)s %(asctime)s %(module)s ' +
- '[%(process)d %(thread)d] %(message)s'
- },
- 'simple': {
- 'format': '%(levelname)s %(asctime)s %(module)s %(lineno)s ' +
- '%(message)s'
- },
- 'stats': {
- 'format': '%(message)s'
- },
- },
-
- 'handlers': {
- # Outputs log to console.
- 'console': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'simple'
- },
- 'console_stats': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'stats'
- },
- # Rotates log file when its size reaches 10MB.
- 'log_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'bgpspeaker.log'),
- 'maxBytes': '10000000',
- 'formatter': 'verbose'
- },
- 'stats_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'statistics_bgps.log'),
- 'maxBytes': '10000000',
- 'formatter': 'stats'
- },
- },
-
- # Fine-grained control of logging per instance.
- 'loggers': {
- 'bgpspeaker': {
- 'handlers': ['console', 'log_file'],
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'stats': {
- 'handlers': ['stats_file', 'console_stats'],
- 'level': 'INFO',
- 'propagate': False,
- 'formatter': 'stats',
- },
- },
-
- # Root loggers.
- 'root': {
- 'handlers': ['console', 'log_file'],
- 'level': 'DEBUG',
- 'propagate': True,
- },
-}"""
- c << log_conf
- with open(os.path.join(self.config_dir, 'bgp_conf.py'), 'w') as f:
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- def create_config(self):
- self._create_config_ryu()
- self._create_config_ryu_bgp()
-
- def is_running_ryu(self):
- results = self.exec_on_ctn('ps ax')
- running = False
- for line in results.split('\n')[1:]:
- if 'ryu-manager' in line:
- running = True
- return running
-
- def start_ryubgp(self, check_running=True, retry=False):
- if check_running:
- if self.is_running_ryu():
- return True
- result = False
- if retry:
- try_times = 3
- else:
- try_times = 1
- cmd = "ryu-manager --verbose "
- cmd += "--config-file %s " % self.SHARED_RYU_CONF
- cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
- cmd += "ryu.services.protocols.bgp.application"
- for _ in range(try_times):
- self.exec_on_ctn(cmd, detach=True)
- if self.is_running_ryu():
- result = True
- break
- time.sleep(1)
- return result
-
- def stop_ryubgp(self, check_running=True, retry=False):
- if check_running:
- if not self.is_running_ryu():
- return True
- result = False
- if retry:
- try_times = 3
- else:
- try_times = 1
- for _ in range(try_times):
- cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
- self.exec_on_ctn(cmd)
- if not self.is_running_ryu():
- result = True
- break
- time.sleep(1)
- return result
-
- def run(self, wait=False, w_time=WAIT_FOR_BOOT):
- w_time = super(RyuBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- def reload_config(self):
- self.stop_ryubgp(retry=True)
- self.start_ryubgp(retry=True)
diff --git a/tests/integrated/bgp/base.py b/tests/integrated/bgp/base.py
index 26fa396..2f210de 100644
--- a/tests/integrated/bgp/base.py
+++ b/tests/integrated/bgp/base.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest

-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga


LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/base_ip6.py b/tests/integrated/bgp/base_ip6.py
index be26faf..d867920 100644
--- a/tests/integrated/bgp/base_ip6.py
+++ b/tests/integrated/bgp/base_ip6.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest

-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga


LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/test_basic.py b/tests/integrated/bgp/test_basic.py
index 5817d44..d1eda39 100644
--- a/tests/integrated/bgp/test_basic.py
+++ b/tests/integrated/bgp/test_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import

import time

-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base

from . import base

diff --git a/tests/integrated/bgp/test_ip6_basic.py b/tests/integrated/bgp/test_ip6_basic.py
index 40461a5..911a0b5 100644
--- a/tests/integrated/bgp/test_ip6_basic.py
+++ b/tests/integrated/bgp/test_ip6_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import

import time

-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base

from . import base_ip6 as base
--
2.7.4
Takashi YAMAMOTO
2017-07-12 14:43:25 UTC
Permalink
this breaks openstack neutron_dynamic_routing, doesn't it?
Post by fumihiko kakuma
This patch requires the v2 patch of Iwase-San.
---
.travis.yml | 2 +-
ryu/lib/docker/__init__.py | 0
ryu/lib/docker/docker_base.py | 801
+++++++++++++++++++++
ryu/lib/docker/install_docker_test_pkg.sh | 43 ++
ryu/lib/docker/install_docker_test_pkg_common.sh | 39 +
.../docker/install_docker_test_pkg_for_travis.sh | 12 +
ryu/lib/docker/quagga.py | 332 +++++++++
ryu/lib/docker/ryubgp.py | 212 ++++++
ryu/tests/integrated/common/__init__.py | 0
ryu/tests/integrated/common/docker_base.py | 801
---------------------
.../integrated/common/install_docker_test_pkg.sh | 43 --
.../common/install_docker_test_pkg_common.sh | 39 -
.../common/install_docker_test_pkg_for_travis.sh | 12 -
ryu/tests/integrated/common/quagga.py | 332 ---------
ryu/tests/integrated/common/ryubgp.py | 212 ------
tests/integrated/bgp/base.py | 6 +-
tests/integrated/bgp/base_ip6.py | 6 +-
tests/integrated/bgp/test_basic.py | 2 +-
tests/integrated/bgp/test_ip6_basic.py | 2 +-
19 files changed, 1448 insertions(+), 1448 deletions(-)
create mode 100644 ryu/lib/docker/__init__.py
create mode 100644 ryu/lib/docker/docker_base.py
create mode 100644 ryu/lib/docker/install_docker_test_pkg.sh
create mode 100644 ryu/lib/docker/install_docker_test_pkg_common.sh
create mode 100644 ryu/lib/docker/install_docker_test_pkg_for_travis.sh
create mode 100644 ryu/lib/docker/quagga.py
create mode 100644 ryu/lib/docker/ryubgp.py
delete mode 100644 ryu/tests/integrated/common/__init__.py
delete mode 100644 ryu/tests/integrated/common/docker_base.py
delete mode 100644 ryu/tests/integrated/common/install_docker_test_pkg.sh
delete mode 100644 ryu/tests/integrated/common/install_docker_test_pkg_
common.sh
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_for_travis.sh
delete mode 100644 ryu/tests/integrated/common/quagga.py
delete mode 100644 ryu/tests/integrated/common/ryubgp.py
diff --git a/.travis.yml b/.travis.yml
index 9e5474a..cd35aac 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ sudo: required # Required to enable Docker service
- pip install tox coveralls
- - bash ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
+ - bash ryu/lib/docker/install_docker_test_pkg_for_travis.sh
- NOSE_VERBOSE=0 tox -e $TOX_ENV
diff --git a/ryu/lib/docker/__init__.py b/ryu/lib/docker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/ryu/lib/docker/docker_base.py b/ryu/lib/docker/docker_base.py
new file mode 100644
index 0000000..1ae2cc2
--- /dev/null
+++ b/ryu/lib/docker/docker_base.py
@@ -0,0 +1,801 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/base.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import itertools
+import logging
+import os
+import subprocess
+import time
+
+import netaddr
+import six
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TEST_PREFIX = ''
+DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
+TEST_PREFIX = DEFAULT_TEST_PREFIX
+TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
+
+BGP_FSM_IDLE = 'BGP_FSM_IDLE'
+BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
+BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
+
+BGP_ATTR_TYPE_ORIGIN = 1
+BGP_ATTR_TYPE_AS_PATH = 2
+BGP_ATTR_TYPE_NEXT_HOP = 3
+BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
+BGP_ATTR_TYPE_LOCAL_PREF = 5
+BGP_ATTR_TYPE_COMMUNITIES = 8
+BGP_ATTR_TYPE_ORIGINATOR_ID = 9
+BGP_ATTR_TYPE_CLUSTER_LIST = 10
+BGP_ATTR_TYPE_MP_REACH_NLRI = 14
+BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+
+BRIDGE_TYPE_DOCKER = 'docker'
+BRIDGE_TYPE_BRCTL = 'brctl'
+BRIDGE_TYPE_OVS = 'ovs'
+
+
+ super(CommandError, self).__init__()
+ self.out = out
+
+
+ e = RuntimeError()
+ r = f()
+ time.sleep(s)
+ return r
+ raise e
+
+
+ super(CmdBuffer, self).__init__()
+ self.delim = delim
+
+ self.append(value)
+
+ return self.delim.join(self)
+
+
+
+ stdout = stdout or ''
+ obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
+ obj.stderr = stderr or ''
+ obj.command = command
+ obj.returncode = returncode
+ return obj
+
+
+
+ """Execute a command using subprocess.Popen()
+ - out: stdout from subprocess.Popen()
+ out has some attributes.
+ out.returncode: returncode of subprocess.Popen()
+ out.stderr: stderr from subprocess.Popen()
+ """
+ p_stdout = subprocess.PIPE
+ p_stderr = subprocess.PIPE
+ p_stdout = None
+ p_stderr = None
+ pop = subprocess.Popen(cmd, shell=True, executable=executable,
+ stdout=p_stdout,
+ stderr=p_stderr)
+ __stdout, __stderr = pop.communicate()
+ _stdout = six.text_type(__stdout, 'utf-8')
+ _stderr = six.text_type(__stderr, 'utf-8')
+ out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
+ return out
+
+ out = None
+ out = self._execute(cmd, capture=capture)
+ LOG.info(out.command)
+ return out
+ LOG.error("stdout: %s", out)
+ LOG.error("stderr: %s", out.stderr)
+ break
+ time.sleep(interval)
+ raise CommandError(out)
+
+ cmd = 'sudo %s' % cmd
+ return self.execute(cmd, capture=capture,
+ try_times=try_times, interval=interval)
+
+
+ self.baseimage = baseimage
+ self.cmd = Command()
+
+ out = self.cmd.sudo('sudo docker images')
+ images = []
+ images.append(line.split()[0])
+ return images
+
+ return name in self.get_images()
+
+ self.cmd.sudo(
+ "docker build -t {0} {1}".format(tagname, dockerfile_dir),
+ try_times=3)
+
+ return tagname
+ self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
+
+ def create_quagga(self, tagname='quagga', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ pkges = ' '.join([
+ 'telnet',
+ 'tcpdump',
+ 'quagga',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'RUN apt-get update'
+ c << 'RUN apt-get install -qy --no-install-recommends %s' % pkges
+ c << 'CMD /usr/lib/quagga/bgpd'
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
+ self.build(tagname, workdir)
+ return tagname
+
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ workdir_ctn = '/root/osrg/ryu'
+ pkges = ' '.join([
+ 'tcpdump',
+ 'iproute2',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'ADD ryu %s' % workdir_ctn
+ install = ' '.join([
+ 'RUN apt-get update',
+ '&& apt-get install -qy --no-install-recommends %s' % pkges,
+ '&& cd %s' % workdir_ctn,
+ # Note: Clean previous builds, because "python setup.py install"
+ # might fail if the current directory contains the symlink to
+ # Docker host file systems.
+ '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
+ '&& pip install -r tools/pip-requires -r
tools/optional-requires',
+ '&& python setup.py install',
+ ])
+ c << install
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
+ self.cmd.execute('cp -r ../ryu %s/' % workdir)
+ self.build(tagname, workdir)
+ return tagname
+
+
+ def __init__(self, name, subnet='', start_ip=None, end_ip=None,
+ with_ip=True, self_ip=False,
+ fixed_ip=None, reuse=False,
+ """Manage a bridge
+ - name: bridge name
+ - subnet: network cider to be used in this bridge
+ - start_ip: start address of an ip to be used in the subnet
+ - end_ip: end address of an ip to be used in the subnet
+ - with_ip: specify if assign automatically an ip address
+ - self_ip: specify if assign an ip address for the bridge
+ - fixed_ip: an ip address to be assigned to the bridge
+ - reuse: specify if use an existing bridge
+ - br_type: One either in a 'docker', 'brctl' or 'ovs'
+ """
+ self.cmd = Command()
+ self.name = name
+ if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
+ raise Exception("argument error br_type: %s" % br_type)
+ self.br_type = br_type
+ self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
+ self.name = '{0}_{1}'.format(TEST_PREFIX, name)
+ self.with_ip = with_ip
+ self.subnet = netaddr.IPNetwork(subnet)
+ self.start_ip = start_ip
+ self.start_ip = netaddr.IPAddress(self.subnet.first)
+ self.end_ip = end_ip
+ self.end_ip = netaddr.IPAddress(self.subnet.last)
+
+ yield host
+ self._ip_generator = _ip_gen()
+ # throw away first network address
+ self.next_ip_address()
+
+ self.self_ip = self_ip
+ self.ip_addr = fixed_ip
+ self.ip_addr = self.next_ip_address()
+ gw = "--gateway %s" % self.ip_addr.split('/')[0]
+ v6 = ''
+ v6 = '--ipv6'
+ cmd = ("docker network create --driver bridge %s "
+ "%s --subnet %s %s" % (v6, gw, subnet, self.name))
+ cmd = "ip link add {0} type bridge".format(self.name)
+ cmd = "ovs-vsctl add-br {0}".format(self.name)
+ raise ValueError('Unsupported br_type: %s' % self.br_type)
+ self.delete()
+ self.execute(cmd, sudo=True, retry=True)
+ try_several_times(f)
+ self.execute("ip link set up dev {0}".format(self.name),
+ sudo=True, retry=True)
+
+ ips = self.check_br_addr(self.name)
+ self.execute(
+ "ip addr del {0} dev {1}".format(ip, self.name),
+ sudo=True, retry=True)
+ self.execute(
+ "ip addr add {0} dev {1}".format(self.ip_addr, self.name
),
+ sudo=True, retry=True)
+ self.ctns = []
+
+ out = self.execute('docker network ls', sudo=True, retry=True)
+ bridges = []
+ bridges.append(line.split()[1])
+ return bridges
+
+ out = self.execute('brctl show', retry=True)
+ bridges = []
+ bridges.append(line.split()[0])
+ return bridges
+
+ out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
+ return out.splitlines()
+
+ return self.get_bridges_dc()
+ return self.get_bridges_brctl()
+ return self.get_bridges_ovs()
+
+ return self.name in self.get_bridges()
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ return m(cmd, capture=capture)
+
+ ips = {}
+ cmd = "ip a show dev %s" % br
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[4] = elems[1]
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[6] = elems[1]
+ return ips
+
+ return "{0}/{1}".format(next(self._ip_generator),
+ self.subnet.prefixlen)
+
+ name = ctn.next_if_name()
+ self.ctns.append(ctn)
+ ip_address = None
+ ipv4 = None
+ ipv6 = None
+ ip_address = self.next_ip_address()
+ ip_address_ip = ip_address.split('/')[0]
+ version = 4
+ version = 6
+ opt_ip = "--ip %s" % ip_address_ip
+ ipv4 = ip_address
+ opt_ip = "--ip6 %s" % ip_address_ip
+ ipv6 = ip_address
+ cmd = "docker network connect %s %s %s" % (
+ opt_ip, self.name, ctn.docker_name())
+ self.execute(cmd, sudo=True)
+ ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=name)
+ ip_address = self.next_ip_address()
+ version = 4
+ version = 6
+ ctn.pipework(self, ip_address, name, version=version)
+ ctn.pipework(self, '0/0', name)
+ return ip_address
+
+ return
+ self.execute("docker network rm %s" % self.name,
+ sudo=True, retry=True)
+ self.execute("ip link set down dev %s" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ip link delete %s type bridge" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ovs-vsctl del-br %s" % self.name,
+ sudo=True, retry=True)
+
+
+ self.name = name
+ self.image = image
+ self.shared_volumes = []
+ self.ip_addrs = []
+ self.ip6_addrs = []
+ self.is_running = False
+ self.eths = []
+ self.id = None
+
+ self.cmd = Command()
+ self.remove()
+
+ return self.name
+ return '{0}_{1}'.format(TEST_PREFIX, self.name)
+
+ return self.id
+ return self.docker_name()
+
+ name = 'eth{0}'.format(len(self.eths) + 1)
+ self.eths.append(name)
+ return name
+
+ self.ip_addrs.append((ifname, ipv4, bridge))
+ self.ip6_addrs.append((ifname, ipv6, bridge))
+
+ addrinfo = {}
+ ip_addrs = self.ip_addrs
+ ip_addrs = self.ip6_addrs
+ return None
+ addrinfo[addr[1]] = addr[0]
+ return addrinfo
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ return m(cmd, capture=capture)
+
+ return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
+ return self.cmd.sudo(cmd, capture=capture)
+
+ name = self.docker_name()
+ flag = '-d' if detach else ''
+ return self.dcexec('docker exec {0} {1} {2}'.format(
+ flag, name, cmd), capture=capture)
+
+ cmd = 'docker ps --no-trunc=true'
+ cmd += ' --all=true'
+ out = self.dcexec(cmd, retry=True)
+ containers = []
+ containers.append(line.split()[-1])
+ return containers
+
+ return self.docker_name() in self.get_containers(allctn=allctn)
+
+ c = CmdBuffer(' ')
+ c << "docker run --privileged=true"
+ c << "-v {0}:{1}".format(sv[0], sv[1])
+ c << "--name {0} --hostname {0} -id {1}".format(self.docker_name()
,
+ self.image)
+ self.id = self.dcexec(str(c), retry=True)
+ self.is_running = True
+ self.exec_on_ctn("ip li set up dev lo")
+ ipv4 = None
+ ipv6 = None
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv4 = elems[1]
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv6 = elems[1]
+ self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
+ ifname='eth0')
+ return 0
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ LOG.warning('Call run() before pipeworking')
+ return
+ c = CmdBuffer(' ')
+ c << "pipework {0}".format(bridge.name)
+
+ c << "-i {0}".format(intf_name)
+ intf_name = "eth1"
+ ipv4 = None
+ ipv6 = None
+ ipv4 = ip_addr
+ c << '-a 6'
+ ipv6 = ip_addr
+ c << "{0} {1}".format(self.docker_name(), ip_addr)
+ self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=intf_name)
+ self.execute(str(c), sudo=True, retry=True)
+
+ cmd = "docker inspect -f '{{.State.Pid}}' %s" %
self.docker_name()
+ return int(self.dcexec(cmd))
+ return -1
+
+ interface = "eth0"
+ filename = "{0}/{1}.dump".format(
+ self.shared_volumes[0][1], interface)
+ self.exec_on_ctn(
+ "tcpdump -i {0} -w {1}".format(interface, filename),
+ detach=True)
+
+
+
+ WAIT_FOR_BOOT = 1
+ RETRY_INTERVAL = 5
+ DEFAULT_PEER_ARG = {'neigh_addr': '',
+ 'passwd': None,
+ 'vpn': False,
+ 'flowspec': False,
+ 'is_rs_client': False,
+ 'is_rr_client': False,
+ 'cluster_id': None,
+ 'policies': None,
+ 'passive': False,
+ 'local_addr': '',
+ 'as2': False,
+ 'graceful_restart': None,
+ 'local_as': None,
+ 'prefix_limit': None}
+ default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
+ DEFAULT_ROUTE_ARG = {'prefix': None,
+ 'rf': 'ipv4',
+ 'attr': None,
+ 'next-hop': None,
+ 'as-path': None,
+ 'community': None,
+ 'med': None,
+ 'local-pref': None,
+ 'extended-community': None,
+ 'matchs': None,
+ 'thens': None}
+ default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
+
+ self.config_dir = TEST_BASE_DIR
+ self.config_dir = os.path.join(self.config_dir, TEST_PREFIX)
+ self.config_dir = os.path.join(self.config_dir, name)
+ self.asn = asn
+ self.router_id = router_id
+ self.peers = {}
+ self.routes = {}
+ self.policies = {}
+ super(BGPContainer, self).__init__(name, ctn_image_name)
+ self.execute(
+ 'rm -rf {0}'.format(self.config_dir), sudo=True)
+ self.execute('mkdir -p {0}'.format(self.config_dir))
+ self.execute('chmod 777 {0}'.format(self.config_dir))
+
+ return str({'name': self.name, 'asn': self.asn,
+ 'router_id': self.router_id})
+
+ self.create_config()
+ super(BGPContainer, self).run()
+ time.sleep(w_time)
+ return w_time
+
+ def add_peer(self, peer, bridge='', reload_config=True, v6=False,
+ peer_info = peer_info or {}
+ self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
+ self.peers[peer].update(peer_info)
+ peer_keys = sorted(self.peers[peer].keys())
+ raise Exception("argument error peer_info: %s" % peer_info)
+
+ neigh_addr = ''
+ local_addr = ''
+ it = itertools.product(self.ip_addrs, peer.ip_addrs)
+ it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
+
+ continue
+ neigh_addr = you[1]
+ local_addr = me[1]
+ addr, mask = local_addr.split('/')
+ local_addr = "{0}%{1}/{2}".format(addr, me[0], mask)
+ break
+
+ raise Exception('peer {0} seems not ip
reachable'.format(peer))
+
+ self.peers[peer]['policies'] = {}
+
+ self.peers[peer]['neigh_addr'] = neigh_addr
+ self.peers[peer]['local_addr'] = local_addr
+ self.create_config()
+ self.reload_config()
+
+ del self.peers[peer]
+ self.create_config()
+ self.reload_config()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ return self.execute('cat {0}/*.log'.format(self.config_dir))
+
+ route_info = route_info or {}
+ self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
+ self.routes[route].update(route_info)
+ route_keys = sorted(self.routes[route].keys())
+ raise Exception("argument error route_info: %s" % route_info)
+ self.routes[route]['prefix'] = route
+ self.create_config()
+ self.reload_config()
+
+ def add_policy(self, policy, peer, typ, default='accept',
+ self.set_default_policy(peer, typ, default)
+ self.define_policy(policy)
+ self.assign_policy(peer, policy, typ)
+ self.create_config()
+ self.reload_config()
+
+ if (typ in ['in', 'out', 'import', 'export'] and
+ self.peers[peer]['default-policy'] = {}
+ self.peers[peer]['default-policy'][typ] = default
+ raise Exception('wrong type or default')
+
+ self.policies[policy['name']] = policy
+
+ raise Exception('peer {0} not found'.format(peer.name))
+ name = policy['name']
+ raise Exception('policy {0} not found'.format(name))
+ self.peers[peer]['policies'][typ] = policy
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ version = netaddr.IPNetwork(prefix).version
+ addr = prefix.split('/')[0]
+ ping_cmd = 'ping'
+ ping_cmd = 'ping6'
+ raise Exception(
+ 'unsupported route family: {0}'.format(version))
+ cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format(
+ ping_cmd, addr)
+ interval = 1
+ count = 0
+ res = self.exec_on_ctn(cmd)
+ LOG.info(res)
+ break
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+ return True
+
+ interval = 1
+ count = 0
+ state = self.get_neighbor_state(peer)
+ LOG.info("%s's peer %s state: %s",
+ self.router_id, peer.router_id, state)
+ return
+
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+
+ cmd = '/sbin/ip route add {0} via {1}'.format(network, next_hop)
+ self.exec_on_ctn(cmd)
+
+ cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
+ self.exec_on_ctn(cmd)
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
diff --git a/ryu/lib/docker/install_docker_test_pkg.sh
b/ryu/lib/docker/install_docker_test_pkg.sh
new file mode 100644
index 0000000..a771dfc
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+function add_docker_aptline {
+ sudo apt-get update
+ if ! apt-cache search docker-engine | grep docker-engine; then
+ VER=`lsb_release -r`
+ if echo $VER | grep 12.04; then
+ REL_NAME=precise
+ elif echo $VER | grep 14.04; then
+ REL_NAME=trusty
+ elif echo $VER | grep 15.10; then
+ REL_NAME=wily
+ elif echo $VER | grep 16.04; then
+ REL_NAME=xenial
+ else
+ retrun 1
+ fi
+ RELEASE=ubuntu-$REL_NAME
+ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+ sudo sh -c "echo deb https://apt.dockerproject.org/repo $RELEASE
main > /etc/apt/sources.list.d/docker.list"
+ fi
+}
+
+init_variables
+
+if [ $APTLINE_DOCKER -eq 1 ]; then
+ add_docker_aptline
+fi
+
+sudo apt-get update
+if apt-cache search docker-engine | grep docker-engine; then
+ DOCKER_PKG=docker-engine
+else
+ DOCKER_PKG=docker.io
+fi
+sudo apt-get install -y $DOCKER_PKG
+install_depends_pkg
diff --git a/ryu/lib/docker/install_docker_test_pkg_common.sh
b/ryu/lib/docker/install_docker_test_pkg_common.sh
new file mode 100644
index 0000000..44a3e10
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_common.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -ex
+
+function init_variables {
+ APTLINE_DOCKER=0
+ DIR_BASE=/tmp
+}
+
+function process_options {
+ local max
+ local i
+ max=$#
+ i=1
+ while [ $i -le $max ]; do
+ case "$1" in
+ -a|--add-docker-aptline)
+ APTLINE_DOCKER=1
+ ;;
+ -d|--download-dir)
+ shift; ((i++))
+ DIR_BASE=$1
+ ;;
+ esac
+ shift; ((i++))
+ done
+}
+
+function install_pipework {
+ if ! which /usr/local/bin/pipework >/dev/null
+ then
+ sudo rm -rf $DIR_BASE/pipework
+ git clone https://github.com/jpetazzo/pipework.git
$DIR_BASE/pipework
+ sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
+ fi
+}
+
+function install_depends_pkg {
+ install_pipework
+}
diff --git a/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
new file mode 100644
index 0000000..d8c3b49
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+init_variables
+
+sudo apt-get update
+install_depends_pkg
diff --git a/ryu/lib/docker/quagga.py b/ryu/lib/docker/quagga.py
new file mode 100644
index 0000000..9b6d218
--- /dev/null
+++ b/ryu/lib/docker/quagga.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/quagga.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import netaddr
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/quagga'
+
+ super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
+ self.zebra = zebra
+ self._create_config_debian()
+
+ w_time = super(QuaggaBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ rib = []
+ return self.get_global_rib_with_prefix(prefix, rf)
+
+ out = self.vtysh('show bgp {0} unicast'.format(rf), config=False)
+ return rib
+
+ read_next = False
+
+ ibgp = False
+ line = line[2:]
+ line = line[1:]
+ ibgp = True
+ continue
+
+ elems = line.split()
+
+ read_next = True
+ prefix = elems[0]
+ continue
+ nexthop = elems[0]
+ prefix = elems[0]
+ nexthop = elems[1]
+ read_next = False
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'ibgp': ibgp})
+
+ return rib
+
+ rib = []
+
+ lines = [line.strip() for line in self.vtysh(
+ 'show bgp {0} unicast {1}'.format(rf, prefix),
+ config=False).split('\n')]
+
+ return rib
+
+ lines = lines[2:]
+
+ lines.pop(0) # another useless line
+ lines = lines[2:] # other useless lines
+ raise Exception('unknown output format {0}'.format(lines))
+
+ aspath = []
+ aspath = [int(asn) for asn in lines[0].split()]
+
+ nexthop = lines[1].split()[0].strip()
+ info = [s.strip(',') for s in lines[2].split()]
+ attrs = []
+ med = info[info.index('metric') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
+ 'metric': int(med)})
+ localpref = info[info.index('localpref') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
+ 'value': int(localpref)})
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'aspath': aspath, 'attrs': attrs})
+
+ return rib
+
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
+
+ info = [l.strip() for l in self.vtysh(
+ 'show bgp neighbors {0}'.format(neigh_addr),
+ config=False).split('\n')]
+
+ raise Exception('unknown format')
+
+ idx1 = info[0].index('BGP neighbor is ')
+ idx2 = info[0].index(',')
+ n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
+ idx1 = info[2].index('= ')
+ state = info[2][idx1 + len('= '):]
+ return base.BGP_FSM_IDLE
+ return base.BGP_FSM_ACTIVE
+ return base.BGP_FSM_ESTABLISHED
+ return state
+
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ self.vtysh('clear ip bgp * soft', config=False)
+
+ zebra = 'no'
+ self._create_config_bgp()
+ zebra = 'yes'
+ self._create_config_zebra()
+ self._create_config_daemons(zebra)
+
+ c = base.CmdBuffer()
+ c << 'vtysh_enable=yes'
+ c << 'zebra_options=" --daemon -A 127.0.0.1"'
+ c << 'bgpd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospfd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospf6d_options=" --daemon -A ::1"'
+ c << 'ripd_options=" --daemon -A 127.0.0.1"'
+ c << 'ripngd_options=" --daemon -A ::1"'
+ c << 'isisd_options=" --daemon -A 127.0.0.1"'
+ c << 'babeld_options=" --daemon -A 127.0.0.1"'
+ c << 'watchquagga_enable=yes'
+ c << 'watchquagga_options=(--daemon)'
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'zebra=%s' % zebra
+ c << 'bgpd=yes'
+ c << 'ospfd=no'
+ c << 'ospf6d=no'
+ c << 'ripd=no'
+ c << 'ripngd=no'
+ c << 'isisd=no'
+ c << 'babeld=no'
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+
+ c = base.CmdBuffer()
+ c << 'hostname bgpd'
+ c << 'password zebra'
+ c << 'router bgp {0}'.format(self.asn)
+ c << 'bgp router-id {0}'.format(self.router_id)
+ c << 'bgp graceful-restart'
+
+ version = 4
+ version = netaddr.IPNetwork(info['neigh_addr']).version
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << 'no bgp default ipv4-unicast'
+
+ c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
+ c << 'neighbor {0} route-server-client'.format(n_addr)
+ c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
+ typ)
+ c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
+ c << 'neighbor {0} passive'.format(n_addr)
+ c << 'address-family ipv6 unicast'
+ c << 'neighbor {0} activate'.format(n_addr)
+ c << 'exit-address-family'
+
+ c << 'network {0}'.format(route['prefix'])
+ c << 'address-family ipv6 unicast'
+ c << 'network {0}'.format(route['prefix'])
+ c << 'exit-address-family'
+ raise Exception(
+ 'unsupported route faily: {0}'.format(route['rf']))
+
+ c << 'address-family ipv6 unicast'
+ c << 'redistribute connected'
+ c << 'exit-address-family'
+ c << 'redistribute connected'
+
+ c << 'access-list {0} {1} {2}'.format(name, policy['type'],
+ policy['match'])
+ c << 'route-map {0} permit 10'.format(name)
+ c << 'match ip address {0}'.format(name)
+ c << 'set metric {0}'.format(policy['med'])
+
+ c << 'debug bgp as4'
+ c << 'debug bgp fsm'
+ c << 'debug bgp updates'
+ c << 'debug bgp events'
+ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
+
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'hostname zebra'
+ c << 'password zebra'
+ c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
+ c << 'debug zebra packet'
+ c << 'debug zebra kernel'
+ c << 'debug zebra rib'
+ c << ''
+
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ cmd = [cmd]
+ cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
+ return self.exec_on_ctn(
+ "vtysh -d bgpd -c 'en' -c 'conf t' -c "
+ "'router bgp {0}' {1}".format(self.asn, cmd),
+ capture=True)
+ return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
+ capture=True)
+
+ daemon = []
+ daemon.append('bgpd')
+ daemon.append('zebra')
+ cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
+ self.exec_on_ctn(cmd, capture=True)
+
+
+ def __init__(self, name, config, ctn_image_name,
+ asn = None
+ router_id = None
+ line = line.strip()
+ asn = int(line[len('router bgp'):].strip())
+ router_id = line[len('bgp router-id'):].strip()
+ raise Exception('asn not in quagga config')
+ raise Exception('router-id not in quagga config')
+ self.config = config
+ super(RawQuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name, zebra)
+
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(self.config)
+ f.writelines(self.config)
diff --git a/ryu/lib/docker/ryubgp.py b/ryu/lib/docker/ryubgp.py
new file mode 100644
index 0000000..8fe16f4
--- /dev/null
+++ b/ryu/lib/docker/ryubgp.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import time
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/ryu'
+
+ super(RyuBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
+ self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
+ self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
+ self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
+
+ c = base.CmdBuffer()
+ c << '[DEFAULT]'
+ c << 'verbose=True'
+ c << 'log_file=/etc/ryu/manager.log'
+ LOG.info("[%s's new config]" % self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'import os'
+ c << ''
+ c << 'BGP = {'
+ c << " 'local_as': %s," % str(self.asn)
+ c << " 'router_id': '%s'," % self.router_id
+ c << " 'neighbors': ["
+ c << " {"
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << " 'address': '%s'," % n_addr
+ c << " 'remote_as': %s," % str(peer.asn)
+ c << " 'enable_ipv4': True,"
+ c << " 'enable_ipv6': True,"
+ c << " 'enable_vpnv4': True,"
+ c << " 'enable_vpnv6': True,"
+ c << ' },'
+ c << ' ],'
+ c << " 'routes': ["
+ c << " {"
+ c << " 'prefix': '%s'," % route['prefix']
+ c << " },"
+ c << " ],"
+ c << "}"
+ log_conf = """LOGGING = {
+
+ # We use python logging package for logging.
+ 'version': 1,
+ 'disable_existing_loggers': False,
+
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s ' +
+ '[%(process)d %(thread)d] %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(asctime)s %(module)s %(lineno)s ' +
+ '%(message)s'
+ },
+ 'stats': {
+ 'format': '%(message)s'
+ },
+ },
+
+ 'handlers': {
+ # Outputs log to console.
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ 'console_stats': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'stats'
+ },
+ # Rotates log file when its size reaches 10MB.
+ 'log_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'bgpspeaker.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'verbose'
+ },
+ 'stats_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'statistics_bgps.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'stats'
+ },
+ },
+
+ # Fine-grained control of logging per instance.
+ 'loggers': {
+ 'bgpspeaker': {
+ 'handlers': ['console', 'log_file'],
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'stats': {
+ 'handlers': ['stats_file', 'console_stats'],
+ 'level': 'INFO',
+ 'propagate': False,
+ 'formatter': 'stats',
+ },
+ },
+
+ # Root loggers.
+ 'root': {
+ 'handlers': ['console', 'log_file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+}"""
+ c << log_conf
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ self._create_config_ryu()
+ self._create_config_ryu_bgp()
+
+ results = self.exec_on_ctn('ps ax')
+ running = False
+ running = True
+ return running
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = "ryu-manager --verbose "
+ cmd += "--config-file %s " % self.SHARED_RYU_CONF
+ cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
+ cmd += "ryu.services.protocols.bgp.application"
+ self.exec_on_ctn(cmd, detach=True)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
+ self.exec_on_ctn(cmd)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ w_time = super(RyuBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ self.stop_ryubgp(retry=True)
+ self.start_ryubgp(retry=True)
diff --git a/ryu/tests/integrated/common/__init__.py
b/ryu/tests/integrated/common/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/ryu/tests/integrated/common/docker_base.py
b/ryu/tests/integrated/common/docker_base.py
deleted file mode 100644
index 1ae2cc2..0000000
--- a/ryu/tests/integrated/common/docker_base.py
+++ /dev/null
@@ -1,801 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/base.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import itertools
-import logging
-import os
-import subprocess
-import time
-
-import netaddr
-import six
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_TEST_PREFIX = ''
-DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
-TEST_PREFIX = DEFAULT_TEST_PREFIX
-TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
-
-BGP_FSM_IDLE = 'BGP_FSM_IDLE'
-BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
-BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
-
-BGP_ATTR_TYPE_ORIGIN = 1
-BGP_ATTR_TYPE_AS_PATH = 2
-BGP_ATTR_TYPE_NEXT_HOP = 3
-BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
-BGP_ATTR_TYPE_LOCAL_PREF = 5
-BGP_ATTR_TYPE_COMMUNITIES = 8
-BGP_ATTR_TYPE_ORIGINATOR_ID = 9
-BGP_ATTR_TYPE_CLUSTER_LIST = 10
-BGP_ATTR_TYPE_MP_REACH_NLRI = 14
-BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
-
-BRIDGE_TYPE_DOCKER = 'docker'
-BRIDGE_TYPE_BRCTL = 'brctl'
-BRIDGE_TYPE_OVS = 'ovs'
-
-
- super(CommandError, self).__init__()
- self.out = out
-
-
- e = RuntimeError()
- r = f()
- time.sleep(s)
- return r
- raise e
-
-
- super(CmdBuffer, self).__init__()
- self.delim = delim
-
- self.append(value)
-
- return self.delim.join(self)
-
-
-
- stdout = stdout or ''
- obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
- obj.stderr = stderr or ''
- obj.command = command
- obj.returncode = returncode
- return obj
-
-
-
- """Execute a command using subprocess.Popen()
- - out: stdout from subprocess.Popen()
- out has some attributes.
- out.returncode: returncode of subprocess.Popen()
- out.stderr: stderr from subprocess.Popen()
- """
- p_stdout = subprocess.PIPE
- p_stderr = subprocess.PIPE
- p_stdout = None
- p_stderr = None
- pop = subprocess.Popen(cmd, shell=True, executable=executable,
- stdout=p_stdout,
- stderr=p_stderr)
- __stdout, __stderr = pop.communicate()
- _stdout = six.text_type(__stdout, 'utf-8')
- _stderr = six.text_type(__stderr, 'utf-8')
- out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
- return out
-
- out = None
- out = self._execute(cmd, capture=capture)
- LOG.info(out.command)
- return out
- LOG.error("stdout: %s", out)
- LOG.error("stderr: %s", out.stderr)
- break
- time.sleep(interval)
- raise CommandError(out)
-
- cmd = 'sudo %s' % cmd
- return self.execute(cmd, capture=capture,
- try_times=try_times, interval=interval)
-
-
- self.baseimage = baseimage
- self.cmd = Command()
-
- out = self.cmd.sudo('sudo docker images')
- images = []
- images.append(line.split()[0])
- return images
-
- return name in self.get_images()
-
- self.cmd.sudo(
- "docker build -t {0} {1}".format(tagname, dockerfile_dir),
- try_times=3)
-
- return tagname
- self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
-
- def create_quagga(self, tagname='quagga', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- pkges = ' '.join([
- 'telnet',
- 'tcpdump',
- 'quagga',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'RUN apt-get update'
- c << 'RUN apt-get install -qy --no-install-recommends %s' % pkges
- c << 'CMD /usr/lib/quagga/bgpd'
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
- self.build(tagname, workdir)
- return tagname
-
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- workdir_ctn = '/root/osrg/ryu'
- pkges = ' '.join([
- 'tcpdump',
- 'iproute2',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'ADD ryu %s' % workdir_ctn
- install = ' '.join([
- 'RUN apt-get update',
- '&& apt-get install -qy --no-install-recommends %s' % pkges,
- '&& cd %s' % workdir_ctn,
- # Note: Clean previous builds, because "python setup.py install"
- # might fail if the current directory contains the symlink to
- # Docker host file systems.
- '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
- '&& pip install -r tools/pip-requires -r
tools/optional-requires',
- '&& python setup.py install',
- ])
- c << install
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
- self.cmd.execute('cp -r ../ryu %s/' % workdir)
- self.build(tagname, workdir)
- return tagname
-
-
- def __init__(self, name, subnet='', start_ip=None, end_ip=None,
- with_ip=True, self_ip=False,
- fixed_ip=None, reuse=False,
- """Manage a bridge
- - name: bridge name
- - subnet: network cider to be used in this bridge
- - start_ip: start address of an ip to be used in the subnet
- - end_ip: end address of an ip to be used in the subnet
- - with_ip: specify if assign automatically an ip address
- - self_ip: specify if assign an ip address for the bridge
- - fixed_ip: an ip address to be assigned to the bridge
- - reuse: specify if use an existing bridge
- - br_type: One either in a 'docker', 'brctl' or 'ovs'
- """
- self.cmd = Command()
- self.name = name
- if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
- raise Exception("argument error br_type: %s" % br_type)
- self.br_type = br_type
- self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
- self.name = '{0}_{1}'.format(TEST_PREFIX, name)
- self.with_ip = with_ip
- self.subnet = netaddr.IPNetwork(subnet)
- self.start_ip = start_ip
- self.start_ip = netaddr.IPAddress(self.subnet.first)
- self.end_ip = end_ip
- self.end_ip = netaddr.IPAddress(self.subnet.last)
-
- yield host
- self._ip_generator = _ip_gen()
- # throw away first network address
- self.next_ip_address()
-
- self.self_ip = self_ip
- self.ip_addr = fixed_ip
- self.ip_addr = self.next_ip_address()
- gw = "--gateway %s" % self.ip_addr.split('/')[0]
- v6 = ''
- v6 = '--ipv6'
- cmd = ("docker network create --driver bridge %s "
- "%s --subnet %s %s" % (v6, gw, subnet, self.name))
- cmd = "ip link add {0} type bridge".format(self.name)
- cmd = "ovs-vsctl add-br {0}".format(self.name)
- raise ValueError('Unsupported br_type: %s' % self.br_type)
- self.delete()
- self.execute(cmd, sudo=True, retry=True)
- try_several_times(f)
- self.execute("ip link set up dev {0}".format(self.name),
- sudo=True, retry=True)
-
- ips = self.check_br_addr(self.name)
- self.execute(
- "ip addr del {0} dev {1}".format(ip, self.name),
- sudo=True, retry=True)
- self.execute(
- "ip addr add {0} dev {1}".format(self.ip_addr, self.name
),
- sudo=True, retry=True)
- self.ctns = []
-
- out = self.execute('docker network ls', sudo=True, retry=True)
- bridges = []
- bridges.append(line.split()[1])
- return bridges
-
- out = self.execute('brctl show', retry=True)
- bridges = []
- bridges.append(line.split()[0])
- return bridges
-
- out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
- return out.splitlines()
-
- return self.get_bridges_dc()
- return self.get_bridges_brctl()
- return self.get_bridges_ovs()
-
- return self.name in self.get_bridges()
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3, interval=1)
- return m(cmd, capture=capture)
-
- ips = {}
- cmd = "ip a show dev %s" % br
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[4] = elems[1]
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[6] = elems[1]
- return ips
-
- return "{0}/{1}".format(next(self._ip_generator),
- self.subnet.prefixlen)
-
- name = ctn.next_if_name()
- self.ctns.append(ctn)
- ip_address = None
- ipv4 = None
- ipv6 = None
- ip_address = self.next_ip_address()
- ip_address_ip = ip_address.split('/')[0]
- version = 4
- version = 6
- opt_ip = "--ip %s" % ip_address_ip
- ipv4 = ip_address
- opt_ip = "--ip6 %s" % ip_address_ip
- ipv6 = ip_address
- cmd = "docker network connect %s %s %s" % (
- opt_ip, self.name, ctn.docker_name())
- self.execute(cmd, sudo=True)
- ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
- ifname=name)
- ip_address = self.next_ip_address()
- version = 4
- version = 6
- ctn.pipework(self, ip_address, name, version=version)
- ctn.pipework(self, '0/0', name)
- return ip_address
-
- return
- self.execute("docker network rm %s" % self.name,
- sudo=True, retry=True)
- self.execute("ip link set down dev %s" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ip link delete %s type bridge" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ovs-vsctl del-br %s" % self.name,
- sudo=True, retry=True)
-
-
- self.name = name
- self.image = image
- self.shared_volumes = []
- self.ip_addrs = []
- self.ip6_addrs = []
- self.is_running = False
- self.eths = []
- self.id = None
-
- self.cmd = Command()
- self.remove()
-
- return self.name
- return '{0}_{1}'.format(TEST_PREFIX, self.name)
-
- return self.id
- return self.docker_name()
-
- name = 'eth{0}'.format(len(self.eths) + 1)
- self.eths.append(name)
- return name
-
- self.ip_addrs.append((ifname, ipv4, bridge))
- self.ip6_addrs.append((ifname, ipv6, bridge))
-
- addrinfo = {}
- ip_addrs = self.ip_addrs
- ip_addrs = self.ip6_addrs
- return None
- addrinfo[addr[1]] = addr[0]
- return addrinfo
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3, interval=1)
- return m(cmd, capture=capture)
-
- return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
- return self.cmd.sudo(cmd, capture=capture)
-
- name = self.docker_name()
- flag = '-d' if detach else ''
- return self.dcexec('docker exec {0} {1} {2}'.format(
- flag, name, cmd), capture=capture)
-
- cmd = 'docker ps --no-trunc=true'
- cmd += ' --all=true'
- out = self.dcexec(cmd, retry=True)
- containers = []
- containers.append(line.split()[-1])
- return containers
-
- return self.docker_name() in self.get_containers(allctn=allctn)
-
- c = CmdBuffer(' ')
- c << "docker run --privileged=true"
- c << "-v {0}:{1}".format(sv[0], sv[1])
- c << "--name {0} --hostname {0} -id {1}".format(self.docker_name()
,
- self.image)
- self.id = self.dcexec(str(c), retry=True)
- self.is_running = True
- self.exec_on_ctn("ip li set up dev lo")
- ipv4 = None
- ipv6 = None
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv4 = elems[1]
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv6 = elems[1]
- self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
- ifname='eth0')
- return 0
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- LOG.warning('Call run() before pipeworking')
- return
- c = CmdBuffer(' ')
- c << "pipework {0}".format(bridge.name)
-
- c << "-i {0}".format(intf_name)
- intf_name = "eth1"
- ipv4 = None
- ipv6 = None
- ipv4 = ip_addr
- c << '-a 6'
- ipv6 = ip_addr
- c << "{0} {1}".format(self.docker_name(), ip_addr)
- self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
- ifname=intf_name)
- self.execute(str(c), sudo=True, retry=True)
-
- cmd = "docker inspect -f '{{.State.Pid}}' %s" %
self.docker_name()
- return int(self.dcexec(cmd))
- return -1
-
- interface = "eth0"
- filename = "{0}/{1}.dump".format(
- self.shared_volumes[0][1], interface)
- self.exec_on_ctn(
- "tcpdump -i {0} -w {1}".format(interface, filename),
- detach=True)
-
-
-
- WAIT_FOR_BOOT = 1
- RETRY_INTERVAL = 5
- DEFAULT_PEER_ARG = {'neigh_addr': '',
- 'passwd': None,
- 'vpn': False,
- 'flowspec': False,
- 'is_rs_client': False,
- 'is_rr_client': False,
- 'cluster_id': None,
- 'policies': None,
- 'passive': False,
- 'local_addr': '',
- 'as2': False,
- 'graceful_restart': None,
- 'local_as': None,
- 'prefix_limit': None}
- default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
- DEFAULT_ROUTE_ARG = {'prefix': None,
- 'rf': 'ipv4',
- 'attr': None,
- 'next-hop': None,
- 'as-path': None,
- 'community': None,
- 'med': None,
- 'local-pref': None,
- 'extended-community': None,
- 'matchs': None,
- 'thens': None}
- default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
-
- self.config_dir = TEST_BASE_DIR
- self.config_dir = os.path.join(self.config_dir, TEST_PREFIX)
- self.config_dir = os.path.join(self.config_dir, name)
- self.asn = asn
- self.router_id = router_id
- self.peers = {}
- self.routes = {}
- self.policies = {}
- super(BGPContainer, self).__init__(name, ctn_image_name)
- self.execute(
- 'rm -rf {0}'.format(self.config_dir), sudo=True)
- self.execute('mkdir -p {0}'.format(self.config_dir))
- self.execute('chmod 777 {0}'.format(self.config_dir))
-
- return str({'name': self.name, 'asn': self.asn,
- 'router_id': self.router_id})
-
- self.create_config()
- super(BGPContainer, self).run()
- time.sleep(w_time)
- return w_time
-
- def add_peer(self, peer, bridge='', reload_config=True, v6=False,
- peer_info = peer_info or {}
- self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
- self.peers[peer].update(peer_info)
- peer_keys = sorted(self.peers[peer].keys())
- raise Exception("argument error peer_info: %s" % peer_info)
-
- neigh_addr = ''
- local_addr = ''
- it = itertools.product(self.ip_addrs, peer.ip_addrs)
- it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
-
- continue
- neigh_addr = you[1]
- local_addr = me[1]
- addr, mask = local_addr.split('/')
- local_addr = "{0}%{1}/{2}".format(addr, me[0], mask)
- break
-
- raise Exception('peer {0} seems not ip
reachable'.format(peer))
-
- self.peers[peer]['policies'] = {}
-
- self.peers[peer]['neigh_addr'] = neigh_addr
- self.peers[peer]['local_addr'] = local_addr
- self.create_config()
- self.reload_config()
-
- del self.peers[peer]
- self.create_config()
- self.reload_config()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- return self.execute('cat {0}/*.log'.format(self.config_dir))
-
- route_info = route_info or {}
- self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
- self.routes[route].update(route_info)
- route_keys = sorted(self.routes[route].keys())
- raise Exception("argument error route_info: %s" % route_info)
- self.routes[route]['prefix'] = route
- self.create_config()
- self.reload_config()
-
- def add_policy(self, policy, peer, typ, default='accept',
- self.set_default_policy(peer, typ, default)
- self.define_policy(policy)
- self.assign_policy(peer, policy, typ)
- self.create_config()
- self.reload_config()
-
- if (typ in ['in', 'out', 'import', 'export'] and
- self.peers[peer]['default-policy'] = {}
- self.peers[peer]['default-policy'][typ] = default
- raise Exception('wrong type or default')
-
- self.policies[policy['name']] = policy
-
- raise Exception('peer {0} not found'.format(peer.name))
- name = policy['name']
- raise Exception('policy {0} not found'.format(name))
- self.peers[peer]['policies'][typ] = policy
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- version = netaddr.IPNetwork(prefix).version
- addr = prefix.split('/')[0]
- ping_cmd = 'ping'
- ping_cmd = 'ping6'
- raise Exception(
- 'unsupported route family: {0}'.format(version))
- cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format(
- ping_cmd, addr)
- interval = 1
- count = 0
- res = self.exec_on_ctn(cmd)
- LOG.info(res)
- break
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
- return True
-
- interval = 1
- count = 0
- state = self.get_neighbor_state(peer)
- LOG.info("%s's peer %s state: %s",
- self.router_id, peer.router_id, state)
- return
-
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
-
- cmd = '/sbin/ip route add {0} via {1}'.format(network, next_hop)
- self.exec_on_ctn(cmd)
-
- cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
- self.exec_on_ctn(cmd)
-
- raise NotImplementedError()
-
- raise NotImplementedError()
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg.sh
b/ryu/tests/integrated/common/install_docker_test_pkg.sh
deleted file mode 100644
index a771dfc..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-function add_docker_aptline {
- sudo apt-get update
- if ! apt-cache search docker-engine | grep docker-engine; then
- VER=`lsb_release -r`
- if echo $VER | grep 12.04; then
- REL_NAME=precise
- elif echo $VER | grep 14.04; then
- REL_NAME=trusty
- elif echo $VER | grep 15.10; then
- REL_NAME=wily
- elif echo $VER | grep 16.04; then
- REL_NAME=xenial
- else
- retrun 1
- fi
- RELEASE=ubuntu-$REL_NAME
- sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
- sudo sh -c "echo deb https://apt.dockerproject.org/repo $RELEASE
main > /etc/apt/sources.list.d/docker.list"
- fi
-}
-
-init_variables
-
-if [ $APTLINE_DOCKER -eq 1 ]; then
- add_docker_aptline
-fi
-
-sudo apt-get update
-if apt-cache search docker-engine | grep docker-engine; then
- DOCKER_PKG=docker-engine
-else
- DOCKER_PKG=docker.io
-fi
-sudo apt-get install -y $DOCKER_PKG
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
b/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
deleted file mode 100644
index 44a3e10..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-set -ex
-
-function init_variables {
- APTLINE_DOCKER=0
- DIR_BASE=/tmp
-}
-
-function process_options {
- local max
- local i
- max=$#
- i=1
- while [ $i -le $max ]; do
- case "$1" in
- -a|--add-docker-aptline)
- APTLINE_DOCKER=1
- ;;
- -d|--download-dir)
- shift; ((i++))
- DIR_BASE=$1
- ;;
- esac
- shift; ((i++))
- done
-}
-
-function install_pipework {
- if ! which /usr/local/bin/pipework >/dev/null
- then
- sudo rm -rf $DIR_BASE/pipework
- git clone https://github.com/jpetazzo/pipework.git
$DIR_BASE/pipework
- sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
- fi
-}
-
-function install_depends_pkg {
- install_pipework
-}
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
b/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
deleted file mode 100644
index d8c3b49..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-init_variables
-
-sudo apt-get update
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/quagga.py
b/ryu/tests/integrated/common/quagga.py
deleted file mode 100644
index 9b6d218..0000000
--- a/ryu/tests/integrated/common/quagga.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/quagga.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-
-import netaddr
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/quagga'
-
- super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
- self.zebra = zebra
- self._create_config_debian()
-
- w_time = super(QuaggaBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- rib = []
- return self.get_global_rib_with_prefix(prefix, rf)
-
- out = self.vtysh('show bgp {0} unicast'.format(rf), config=False)
- return rib
-
- read_next = False
-
- ibgp = False
- line = line[2:]
- line = line[1:]
- ibgp = True
- continue
-
- elems = line.split()
-
- read_next = True
- prefix = elems[0]
- continue
- nexthop = elems[0]
- prefix = elems[0]
- nexthop = elems[1]
- read_next = False
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'ibgp': ibgp})
-
- return rib
-
- rib = []
-
- lines = [line.strip() for line in self.vtysh(
- 'show bgp {0} unicast {1}'.format(rf, prefix),
- config=False).split('\n')]
-
- return rib
-
- lines = lines[2:]
-
- lines.pop(0) # another useless line
- lines = lines[2:] # other useless lines
- raise Exception('unknown output format {0}'.format(lines))
-
- aspath = []
- aspath = [int(asn) for asn in lines[0].split()]
-
- nexthop = lines[1].split()[0].strip()
- info = [s.strip(',') for s in lines[2].split()]
- attrs = []
- med = info[info.index('metric') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
- 'metric': int(med)})
- localpref = info[info.index('localpref') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
- 'value': int(localpref)})
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'aspath': aspath, 'attrs': attrs})
-
- return rib
-
- raise Exception('not found peer {0}'.format(peer.router_id))
-
- neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
-
- info = [l.strip() for l in self.vtysh(
- 'show bgp neighbors {0}'.format(neigh_addr),
- config=False).split('\n')]
-
- raise Exception('unknown format')
-
- idx1 = info[0].index('BGP neighbor is ')
- idx2 = info[0].index(',')
- n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
- idx1 = info[2].index('= ')
- state = info[2][idx1 + len('= '):]
- return base.BGP_FSM_IDLE
- return base.BGP_FSM_ACTIVE
- return base.BGP_FSM_ESTABLISHED
- return state
-
- raise Exception('not found peer {0}'.format(peer.router_id))
-
- self.vtysh('clear ip bgp * soft', config=False)
-
- zebra = 'no'
- self._create_config_bgp()
- zebra = 'yes'
- self._create_config_zebra()
- self._create_config_daemons(zebra)
-
- c = base.CmdBuffer()
- c << 'vtysh_enable=yes'
- c << 'zebra_options=" --daemon -A 127.0.0.1"'
- c << 'bgpd_options=" --daemon -A 127.0.0.1"'
- c << 'ospfd_options=" --daemon -A 127.0.0.1"'
- c << 'ospf6d_options=" --daemon -A ::1"'
- c << 'ripd_options=" --daemon -A 127.0.0.1"'
- c << 'ripngd_options=" --daemon -A ::1"'
- c << 'isisd_options=" --daemon -A 127.0.0.1"'
- c << 'babeld_options=" --daemon -A 127.0.0.1"'
- c << 'watchquagga_enable=yes'
- c << 'watchquagga_options=(--daemon)'
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'zebra=%s' % zebra
- c << 'bgpd=yes'
- c << 'ospfd=no'
- c << 'ospf6d=no'
- c << 'ripd=no'
- c << 'ripngd=no'
- c << 'isisd=no'
- c << 'babeld=no'
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
-
- c = base.CmdBuffer()
- c << 'hostname bgpd'
- c << 'password zebra'
- c << 'router bgp {0}'.format(self.asn)
- c << 'bgp router-id {0}'.format(self.router_id)
- c << 'bgp graceful-restart'
-
- version = 4
- version = netaddr.IPNetwork(info['neigh_addr']).version
- n_addr = info['neigh_addr'].split('/')[0]
- c << 'no bgp default ipv4-unicast'
-
- c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
- c << 'neighbor {0} route-server-client'.format(n_addr)
- c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
- typ)
- c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
- c << 'neighbor {0} passive'.format(n_addr)
- c << 'address-family ipv6 unicast'
- c << 'neighbor {0} activate'.format(n_addr)
- c << 'exit-address-family'
-
- c << 'network {0}'.format(route['prefix'])
- c << 'address-family ipv6 unicast'
- c << 'network {0}'.format(route['prefix'])
- c << 'exit-address-family'
- raise Exception(
- 'unsupported route faily: {0}'.format(route['rf']))
-
- c << 'address-family ipv6 unicast'
- c << 'redistribute connected'
- c << 'exit-address-family'
- c << 'redistribute connected'
-
- c << 'access-list {0} {1} {2}'.format(name, policy['type'],
- policy['match'])
- c << 'route-map {0} permit 10'.format(name)
- c << 'match ip address {0}'.format(name)
- c << 'set metric {0}'.format(policy['med'])
-
- c << 'debug bgp as4'
- c << 'debug bgp fsm'
- c << 'debug bgp updates'
- c << 'debug bgp events'
- c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
-
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'hostname zebra'
- c << 'password zebra'
- c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
- c << 'debug zebra packet'
- c << 'debug zebra kernel'
- c << 'debug zebra rib'
- c << ''
-
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- cmd = [cmd]
- cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
- return self.exec_on_ctn(
- "vtysh -d bgpd -c 'en' -c 'conf t' -c "
- "'router bgp {0}' {1}".format(self.asn, cmd),
- capture=True)
- return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
- capture=True)
-
- daemon = []
- daemon.append('bgpd')
- daemon.append('zebra')
- cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
- self.exec_on_ctn(cmd, capture=True)
-
-
- def __init__(self, name, config, ctn_image_name,
- asn = None
- router_id = None
- line = line.strip()
- asn = int(line[len('router bgp'):].strip())
- router_id = line[len('bgp router-id'):].strip()
- raise Exception('asn not in quagga config')
- raise Exception('router-id not in quagga config')
- self.config = config
- super(RawQuaggaBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name, zebra)
-
- LOG.info("[%s's new config]", self.name)
- LOG.info(self.config)
- f.writelines(self.config)
diff --git a/ryu/tests/integrated/common/ryubgp.py
b/ryu/tests/integrated/common/ryubgp.py
deleted file mode 100644
index 8fe16f4..0000000
--- a/ryu/tests/integrated/common/ryubgp.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-import time
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/ryu'
-
- super(RyuBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
- self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
- self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
- self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
-
- c = base.CmdBuffer()
- c << '[DEFAULT]'
- c << 'verbose=True'
- c << 'log_file=/etc/ryu/manager.log'
- LOG.info("[%s's new config]" % self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'import os'
- c << ''
- c << 'BGP = {'
- c << " 'local_as': %s," % str(self.asn)
- c << " 'router_id': '%s'," % self.router_id
- c << " 'neighbors': ["
- c << " {"
- n_addr = info['neigh_addr'].split('/')[0]
- c << " 'address': '%s'," % n_addr
- c << " 'remote_as': %s," % str(peer.asn)
- c << " 'enable_ipv4': True,"
- c << " 'enable_ipv6': True,"
- c << " 'enable_vpnv4': True,"
- c << " 'enable_vpnv6': True,"
- c << ' },'
- c << ' ],'
- c << " 'routes': ["
- c << " {"
- c << " 'prefix': '%s'," % route['prefix']
- c << " },"
- c << " ],"
- c << "}"
- log_conf = """LOGGING = {
-
- # We use python logging package for logging.
- 'version': 1,
- 'disable_existing_loggers': False,
-
- 'formatters': {
- 'verbose': {
- 'format': '%(levelname)s %(asctime)s %(module)s ' +
- '[%(process)d %(thread)d] %(message)s'
- },
- 'simple': {
- 'format': '%(levelname)s %(asctime)s %(module)s %(lineno)s ' +
- '%(message)s'
- },
- 'stats': {
- 'format': '%(message)s'
- },
- },
-
- 'handlers': {
- # Outputs log to console.
- 'console': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'simple'
- },
- 'console_stats': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'stats'
- },
- # Rotates log file when its size reaches 10MB.
- 'log_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'bgpspeaker.log'),
- 'maxBytes': '10000000',
- 'formatter': 'verbose'
- },
- 'stats_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'statistics_bgps.log'),
- 'maxBytes': '10000000',
- 'formatter': 'stats'
- },
- },
-
- # Fine-grained control of logging per instance.
- 'loggers': {
- 'bgpspeaker': {
- 'handlers': ['console', 'log_file'],
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'stats': {
- 'handlers': ['stats_file', 'console_stats'],
- 'level': 'INFO',
- 'propagate': False,
- 'formatter': 'stats',
- },
- },
-
- # Root loggers.
- 'root': {
- 'handlers': ['console', 'log_file'],
- 'level': 'DEBUG',
- 'propagate': True,
- },
-}"""
- c << log_conf
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- self._create_config_ryu()
- self._create_config_ryu_bgp()
-
- results = self.exec_on_ctn('ps ax')
- running = False
- running = True
- return running
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = "ryu-manager --verbose "
- cmd += "--config-file %s " % self.SHARED_RYU_CONF
- cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
- cmd += "ryu.services.protocols.bgp.application"
- self.exec_on_ctn(cmd, detach=True)
- result = True
- break
- time.sleep(1)
- return result
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
- self.exec_on_ctn(cmd)
- result = True
- break
- time.sleep(1)
- return result
-
- w_time = super(RyuBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- self.stop_ryubgp(retry=True)
- self.start_ryubgp(retry=True)
diff --git a/tests/integrated/bgp/base.py b/tests/integrated/bgp/base.py
index 26fa396..2f210de 100644
--- a/tests/integrated/bgp/base.py
+++ b/tests/integrated/bgp/base.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/base_ip6.py b/tests/integrated/bgp/base_
ip6.py
index be26faf..d867920 100644
--- a/tests/integrated/bgp/base_ip6.py
+++ b/tests/integrated/bgp/base_ip6.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/test_basic.py
b/tests/integrated/bgp/test_basic.py
index 5817d44..d1eda39 100644
--- a/tests/integrated/bgp/test_basic.py
+++ b/tests/integrated/bgp/test_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base
diff --git a/tests/integrated/bgp/test_ip6_basic.py
b/tests/integrated/bgp/test_ip6_basic.py
index 40461a5..911a0b5 100644
--- a/tests/integrated/bgp/test_ip6_basic.py
+++ b/tests/integrated/bgp/test_ip6_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base_ip6 as base
--
2.7.4
------------------------------------------------------------
------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
Ryu-devel mailing list
https://lists.sourceforge.net/lists/listinfo/ryu-devel
fumihiko kakuma
2017-07-12 22:04:40 UTC
Permalink
Hi Yamamoto-San,

On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.


Thanks,
Kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
This patch requires the v2 patch of Iwase-San.
---
.travis.yml | 2 +-
ryu/lib/docker/__init__.py | 0
ryu/lib/docker/docker_base.py | 801
+++++++++++++++++++++
ryu/lib/docker/install_docker_test_pkg.sh | 43 ++
ryu/lib/docker/install_docker_test_pkg_common.sh | 39 +
.../docker/install_docker_test_pkg_for_travis.sh | 12 +
ryu/lib/docker/quagga.py | 332 +++++++++
ryu/lib/docker/ryubgp.py | 212 ++++++
ryu/tests/integrated/common/__init__.py | 0
ryu/tests/integrated/common/docker_base.py | 801
---------------------
.../integrated/common/install_docker_test_pkg.sh | 43 --
.../common/install_docker_test_pkg_common.sh | 39 -
.../common/install_docker_test_pkg_for_travis.sh | 12 -
ryu/tests/integrated/common/quagga.py | 332 ---------
ryu/tests/integrated/common/ryubgp.py | 212 ------
tests/integrated/bgp/base.py | 6 +-
tests/integrated/bgp/base_ip6.py | 6 +-
tests/integrated/bgp/test_basic.py | 2 +-
tests/integrated/bgp/test_ip6_basic.py | 2 +-
19 files changed, 1448 insertions(+), 1448 deletions(-)
create mode 100644 ryu/lib/docker/__init__.py
create mode 100644 ryu/lib/docker/docker_base.py
create mode 100644 ryu/lib/docker/install_docker_test_pkg.sh
create mode 100644 ryu/lib/docker/install_docker_test_pkg_common.sh
create mode 100644 ryu/lib/docker/install_docker_test_pkg_for_travis.sh
create mode 100644 ryu/lib/docker/quagga.py
create mode 100644 ryu/lib/docker/ryubgp.py
delete mode 100644 ryu/tests/integrated/common/__init__.py
delete mode 100644 ryu/tests/integrated/common/docker_base.py
delete mode 100644 ryu/tests/integrated/common/install_docker_test_pkg.sh
delete mode 100644 ryu/tests/integrated/common/install_docker_test_pkg_
common.sh
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_for_travis.sh
delete mode 100644 ryu/tests/integrated/common/quagga.py
delete mode 100644 ryu/tests/integrated/common/ryubgp.py
diff --git a/.travis.yml b/.travis.yml
index 9e5474a..cd35aac 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ sudo: required # Required to enable Docker service
- pip install tox coveralls
- - bash ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
+ - bash ryu/lib/docker/install_docker_test_pkg_for_travis.sh
- NOSE_VERBOSE=0 tox -e $TOX_ENV
diff --git a/ryu/lib/docker/__init__.py b/ryu/lib/docker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/ryu/lib/docker/docker_base.py b/ryu/lib/docker/docker_base.py
new file mode 100644
index 0000000..1ae2cc2
--- /dev/null
+++ b/ryu/lib/docker/docker_base.py
@@ -0,0 +1,801 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/base.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import itertools
+import logging
+import os
+import subprocess
+import time
+
+import netaddr
+import six
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TEST_PREFIX = ''
+DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
+TEST_PREFIX = DEFAULT_TEST_PREFIX
+TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
+
+BGP_FSM_IDLE = 'BGP_FSM_IDLE'
+BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
+BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
+
+BGP_ATTR_TYPE_ORIGIN = 1
+BGP_ATTR_TYPE_AS_PATH = 2
+BGP_ATTR_TYPE_NEXT_HOP = 3
+BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
+BGP_ATTR_TYPE_LOCAL_PREF = 5
+BGP_ATTR_TYPE_COMMUNITIES = 8
+BGP_ATTR_TYPE_ORIGINATOR_ID = 9
+BGP_ATTR_TYPE_CLUSTER_LIST = 10
+BGP_ATTR_TYPE_MP_REACH_NLRI = 14
+BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+
+BRIDGE_TYPE_DOCKER = 'docker'
+BRIDGE_TYPE_BRCTL = 'brctl'
+BRIDGE_TYPE_OVS = 'ovs'
+
+
+ super(CommandError, self).__init__()
+ self.out = out
+
+
+ e = RuntimeError()
+ r = f()
+ time.sleep(s)
+ return r
+ raise e
+
+
+ super(CmdBuffer, self).__init__()
+ self.delim = delim
+
+ self.append(value)
+
+ return self.delim.join(self)
+
+
+
+ stdout = stdout or ''
+ obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
+ obj.stderr = stderr or ''
+ obj.command = command
+ obj.returncode = returncode
+ return obj
+
+
+
+ """Execute a command using subprocess.Popen()
+ - out: stdout from subprocess.Popen()
+ out has some attributes.
+ out.returncode: returncode of subprocess.Popen()
+ out.stderr: stderr from subprocess.Popen()
+ """
+ p_stdout = subprocess.PIPE
+ p_stderr = subprocess.PIPE
+ p_stdout = None
+ p_stderr = None
+ pop = subprocess.Popen(cmd, shell=True, executable=executable,
+ stdout=p_stdout,
+ stderr=p_stderr)
+ __stdout, __stderr = pop.communicate()
+ _stdout = six.text_type(__stdout, 'utf-8')
+ _stderr = six.text_type(__stderr, 'utf-8')
+ out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
+ return out
+
+ out = None
+ out = self._execute(cmd, capture=capture)
+ LOG.info(out.command)
+ return out
+ LOG.error("stdout: %s", out)
+ LOG.error("stderr: %s", out.stderr)
+ break
+ time.sleep(interval)
+ raise CommandError(out)
+
+ cmd = 'sudo %s' % cmd
+ return self.execute(cmd, capture=capture,
+ try_times=try_times, interval=interval)
+
+
+ self.baseimage = baseimage
+ self.cmd = Command()
+
+ out = self.cmd.sudo('sudo docker images')
+ images = []
+ images.append(line.split()[0])
+ return images
+
+ return name in self.get_images()
+
+ self.cmd.sudo(
+ "docker build -t {0} {1}".format(tagname, dockerfile_dir),
+ try_times=3)
+
+ return tagname
+ self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
+
+ def create_quagga(self, tagname='quagga', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ pkges = ' '.join([
+ 'telnet',
+ 'tcpdump',
+ 'quagga',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'RUN apt-get update'
+ c << 'RUN apt-get install -qy --no-install-recommends %s' % pkges
+ c << 'CMD /usr/lib/quagga/bgpd'
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
+ self.build(tagname, workdir)
+ return tagname
+
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ workdir_ctn = '/root/osrg/ryu'
+ pkges = ' '.join([
+ 'tcpdump',
+ 'iproute2',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'ADD ryu %s' % workdir_ctn
+ install = ' '.join([
+ 'RUN apt-get update',
+ '&& apt-get install -qy --no-install-recommends %s' % pkges,
+ '&& cd %s' % workdir_ctn,
+ # Note: Clean previous builds, because "python setup.py install"
+ # might fail if the current directory contains the symlink to
+ # Docker host file systems.
+ '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
+ '&& pip install -r tools/pip-requires -r
tools/optional-requires',
+ '&& python setup.py install',
+ ])
+ c << install
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
+ self.cmd.execute('cp -r ../ryu %s/' % workdir)
+ self.build(tagname, workdir)
+ return tagname
+
+
+ def __init__(self, name, subnet='', start_ip=None, end_ip=None,
+ with_ip=True, self_ip=False,
+ fixed_ip=None, reuse=False,
+ """Manage a bridge
+ - name: bridge name
+ - subnet: network cider to be used in this bridge
+ - start_ip: start address of an ip to be used in the subnet
+ - end_ip: end address of an ip to be used in the subnet
+ - with_ip: specify if assign automatically an ip address
+ - self_ip: specify if assign an ip address for the bridge
+ - fixed_ip: an ip address to be assigned to the bridge
+ - reuse: specify if use an existing bridge
+ - br_type: One either in a 'docker', 'brctl' or 'ovs'
+ """
+ self.cmd = Command()
+ self.name = name
+ if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
+ raise Exception("argument error br_type: %s" % br_type)
+ self.br_type = br_type
+ self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
+ self.name = '{0}_{1}'.format(TEST_PREFIX, name)
+ self.with_ip = with_ip
+ self.subnet = netaddr.IPNetwork(subnet)
+ self.start_ip = start_ip
+ self.start_ip = netaddr.IPAddress(self.subnet.first)
+ self.end_ip = end_ip
+ self.end_ip = netaddr.IPAddress(self.subnet.last)
+
+ yield host
+ self._ip_generator = _ip_gen()
+ # throw away first network address
+ self.next_ip_address()
+
+ self.self_ip = self_ip
+ self.ip_addr = fixed_ip
+ self.ip_addr = self.next_ip_address()
+ gw = "--gateway %s" % self.ip_addr.split('/')[0]
+ v6 = ''
+ v6 = '--ipv6'
+ cmd = ("docker network create --driver bridge %s "
+ "%s --subnet %s %s" % (v6, gw, subnet, self.name))
+ cmd = "ip link add {0} type bridge".format(self.name)
+ cmd = "ovs-vsctl add-br {0}".format(self.name)
+ raise ValueError('Unsupported br_type: %s' % self.br_type)
+ self.delete()
+ self.execute(cmd, sudo=True, retry=True)
+ try_several_times(f)
+ self.execute("ip link set up dev {0}".format(self.name),
+ sudo=True, retry=True)
+
+ ips = self.check_br_addr(self.name)
+ self.execute(
+ "ip addr del {0} dev {1}".format(ip, self.name),
+ sudo=True, retry=True)
+ self.execute(
+ "ip addr add {0} dev {1}".format(self.ip_addr, self.name
),
+ sudo=True, retry=True)
+ self.ctns = []
+
+ out = self.execute('docker network ls', sudo=True, retry=True)
+ bridges = []
+ bridges.append(line.split()[1])
+ return bridges
+
+ out = self.execute('brctl show', retry=True)
+ bridges = []
+ bridges.append(line.split()[0])
+ return bridges
+
+ out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
+ return out.splitlines()
+
+ return self.get_bridges_dc()
+ return self.get_bridges_brctl()
+ return self.get_bridges_ovs()
+
+ return self.name in self.get_bridges()
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ return m(cmd, capture=capture)
+
+ ips = {}
+ cmd = "ip a show dev %s" % br
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[4] = elems[1]
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[6] = elems[1]
+ return ips
+
+ return "{0}/{1}".format(next(self._ip_generator),
+ self.subnet.prefixlen)
+
+ name = ctn.next_if_name()
+ self.ctns.append(ctn)
+ ip_address = None
+ ipv4 = None
+ ipv6 = None
+ ip_address = self.next_ip_address()
+ ip_address_ip = ip_address.split('/')[0]
+ version = 4
+ version = 6
+ opt_ip = "--ip %s" % ip_address_ip
+ ipv4 = ip_address
+ opt_ip = "--ip6 %s" % ip_address_ip
+ ipv6 = ip_address
+ cmd = "docker network connect %s %s %s" % (
+ opt_ip, self.name, ctn.docker_name())
+ self.execute(cmd, sudo=True)
+ ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=name)
+ ip_address = self.next_ip_address()
+ version = 4
+ version = 6
+ ctn.pipework(self, ip_address, name, version=version)
+ ctn.pipework(self, '0/0', name)
+ return ip_address
+
+ return
+ self.execute("docker network rm %s" % self.name,
+ sudo=True, retry=True)
+ self.execute("ip link set down dev %s" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ip link delete %s type bridge" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ovs-vsctl del-br %s" % self.name,
+ sudo=True, retry=True)
+
+
+ self.name = name
+ self.image = image
+ self.shared_volumes = []
+ self.ip_addrs = []
+ self.ip6_addrs = []
+ self.is_running = False
+ self.eths = []
+ self.id = None
+
+ self.cmd = Command()
+ self.remove()
+
+ return self.name
+ return '{0}_{1}'.format(TEST_PREFIX, self.name)
+
+ return self.id
+ return self.docker_name()
+
+ name = 'eth{0}'.format(len(self.eths) + 1)
+ self.eths.append(name)
+ return name
+
+ self.ip_addrs.append((ifname, ipv4, bridge))
+ self.ip6_addrs.append((ifname, ipv6, bridge))
+
+ addrinfo = {}
+ ip_addrs = self.ip_addrs
+ ip_addrs = self.ip6_addrs
+ return None
+ addrinfo[addr[1]] = addr[0]
+ return addrinfo
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ return m(cmd, capture=capture)
+
+ return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
+ return self.cmd.sudo(cmd, capture=capture)
+
+ name = self.docker_name()
+ flag = '-d' if detach else ''
+ return self.dcexec('docker exec {0} {1} {2}'.format(
+ flag, name, cmd), capture=capture)
+
+ cmd = 'docker ps --no-trunc=true'
+ cmd += ' --all=true'
+ out = self.dcexec(cmd, retry=True)
+ containers = []
+ containers.append(line.split()[-1])
+ return containers
+
+ return self.docker_name() in self.get_containers(allctn=allctn)
+
+ c = CmdBuffer(' ')
+ c << "docker run --privileged=true"
+ c << "-v {0}:{1}".format(sv[0], sv[1])
+ c << "--name {0} --hostname {0} -id {1}".format(self.docker_name()
,
+ self.image)
+ self.id = self.dcexec(str(c), retry=True)
+ self.is_running = True
+ self.exec_on_ctn("ip li set up dev lo")
+ ipv4 = None
+ ipv6 = None
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv4 = elems[1]
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv6 = elems[1]
+ self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
+ ifname='eth0')
+ return 0
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ LOG.warning('Call run() before pipeworking')
+ return
+ c = CmdBuffer(' ')
+ c << "pipework {0}".format(bridge.name)
+
+ c << "-i {0}".format(intf_name)
+ intf_name = "eth1"
+ ipv4 = None
+ ipv6 = None
+ ipv4 = ip_addr
+ c << '-a 6'
+ ipv6 = ip_addr
+ c << "{0} {1}".format(self.docker_name(), ip_addr)
+ self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=intf_name)
+ self.execute(str(c), sudo=True, retry=True)
+
+ cmd = "docker inspect -f '{{.State.Pid}}' %s" %
self.docker_name()
+ return int(self.dcexec(cmd))
+ return -1
+
+ interface = "eth0"
+ filename = "{0}/{1}.dump".format(
+ self.shared_volumes[0][1], interface)
+ self.exec_on_ctn(
+ "tcpdump -i {0} -w {1}".format(interface, filename),
+ detach=True)
+
+
+
+ WAIT_FOR_BOOT = 1
+ RETRY_INTERVAL = 5
+ DEFAULT_PEER_ARG = {'neigh_addr': '',
+ 'passwd': None,
+ 'vpn': False,
+ 'flowspec': False,
+ 'is_rs_client': False,
+ 'is_rr_client': False,
+ 'cluster_id': None,
+ 'policies': None,
+ 'passive': False,
+ 'local_addr': '',
+ 'as2': False,
+ 'graceful_restart': None,
+ 'local_as': None,
+ 'prefix_limit': None}
+ default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
+ DEFAULT_ROUTE_ARG = {'prefix': None,
+ 'rf': 'ipv4',
+ 'attr': None,
+ 'next-hop': None,
+ 'as-path': None,
+ 'community': None,
+ 'med': None,
+ 'local-pref': None,
+ 'extended-community': None,
+ 'matchs': None,
+ 'thens': None}
+ default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
+
+ self.config_dir = TEST_BASE_DIR
+ self.config_dir = os.path.join(self.config_dir, TEST_PREFIX)
+ self.config_dir = os.path.join(self.config_dir, name)
+ self.asn = asn
+ self.router_id = router_id
+ self.peers = {}
+ self.routes = {}
+ self.policies = {}
+ super(BGPContainer, self).__init__(name, ctn_image_name)
+ self.execute(
+ 'rm -rf {0}'.format(self.config_dir), sudo=True)
+ self.execute('mkdir -p {0}'.format(self.config_dir))
+ self.execute('chmod 777 {0}'.format(self.config_dir))
+
+ return str({'name': self.name, 'asn': self.asn,
+ 'router_id': self.router_id})
+
+ self.create_config()
+ super(BGPContainer, self).run()
+ time.sleep(w_time)
+ return w_time
+
+ def add_peer(self, peer, bridge='', reload_config=True, v6=False,
+ peer_info = peer_info or {}
+ self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
+ self.peers[peer].update(peer_info)
+ peer_keys = sorted(self.peers[peer].keys())
+ raise Exception("argument error peer_info: %s" % peer_info)
+
+ neigh_addr = ''
+ local_addr = ''
+ it = itertools.product(self.ip_addrs, peer.ip_addrs)
+ it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
+
+ continue
+ neigh_addr = you[1]
+ local_addr = me[1]
+ addr, mask = local_addr.split('/')
+ local_addr = "{0}%{1}/{2}".format(addr, me[0], mask)
+ break
+
+ raise Exception('peer {0} seems not ip
reachable'.format(peer))
+
+ self.peers[peer]['policies'] = {}
+
+ self.peers[peer]['neigh_addr'] = neigh_addr
+ self.peers[peer]['local_addr'] = local_addr
+ self.create_config()
+ self.reload_config()
+
+ del self.peers[peer]
+ self.create_config()
+ self.reload_config()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ return self.execute('cat {0}/*.log'.format(self.config_dir))
+
+ route_info = route_info or {}
+ self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
+ self.routes[route].update(route_info)
+ route_keys = sorted(self.routes[route].keys())
+ raise Exception("argument error route_info: %s" % route_info)
+ self.routes[route]['prefix'] = route
+ self.create_config()
+ self.reload_config()
+
+ def add_policy(self, policy, peer, typ, default='accept',
+ self.set_default_policy(peer, typ, default)
+ self.define_policy(policy)
+ self.assign_policy(peer, policy, typ)
+ self.create_config()
+ self.reload_config()
+
+ if (typ in ['in', 'out', 'import', 'export'] and
+ self.peers[peer]['default-policy'] = {}
+ self.peers[peer]['default-policy'][typ] = default
+ raise Exception('wrong type or default')
+
+ self.policies[policy['name']] = policy
+
+ raise Exception('peer {0} not found'.format(peer.name))
+ name = policy['name']
+ raise Exception('policy {0} not found'.format(name))
+ self.peers[peer]['policies'][typ] = policy
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ version = netaddr.IPNetwork(prefix).version
+ addr = prefix.split('/')[0]
+ ping_cmd = 'ping'
+ ping_cmd = 'ping6'
+ raise Exception(
+ 'unsupported route family: {0}'.format(version))
+ cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format(
+ ping_cmd, addr)
+ interval = 1
+ count = 0
+ res = self.exec_on_ctn(cmd)
+ LOG.info(res)
+ break
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+ return True
+
+ interval = 1
+ count = 0
+ state = self.get_neighbor_state(peer)
+ LOG.info("%s's peer %s state: %s",
+ self.router_id, peer.router_id, state)
+ return
+
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+
+ cmd = '/sbin/ip route add {0} via {1}'.format(network, next_hop)
+ self.exec_on_ctn(cmd)
+
+ cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
+ self.exec_on_ctn(cmd)
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
diff --git a/ryu/lib/docker/install_docker_test_pkg.sh
b/ryu/lib/docker/install_docker_test_pkg.sh
new file mode 100644
index 0000000..a771dfc
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+function add_docker_aptline {
+ sudo apt-get update
+ if ! apt-cache search docker-engine | grep docker-engine; then
+ VER=`lsb_release -r`
+ if echo $VER | grep 12.04; then
+ REL_NAME=precise
+ elif echo $VER | grep 14.04; then
+ REL_NAME=trusty
+ elif echo $VER | grep 15.10; then
+ REL_NAME=wily
+ elif echo $VER | grep 16.04; then
+ REL_NAME=xenial
+ else
+ retrun 1
+ fi
+ RELEASE=ubuntu-$REL_NAME
+ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+ sudo sh -c "echo deb https://apt.dockerproject.org/repo $RELEASE
main > /etc/apt/sources.list.d/docker.list"
+ fi
+}
+
+init_variables
+
+if [ $APTLINE_DOCKER -eq 1 ]; then
+ add_docker_aptline
+fi
+
+sudo apt-get update
+if apt-cache search docker-engine | grep docker-engine; then
+ DOCKER_PKG=docker-engine
+else
+ DOCKER_PKG=docker.io
+fi
+sudo apt-get install -y $DOCKER_PKG
+install_depends_pkg
diff --git a/ryu/lib/docker/install_docker_test_pkg_common.sh
b/ryu/lib/docker/install_docker_test_pkg_common.sh
new file mode 100644
index 0000000..44a3e10
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_common.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -ex
+
+function init_variables {
+ APTLINE_DOCKER=0
+ DIR_BASE=/tmp
+}
+
+function process_options {
+ local max
+ local i
+ max=$#
+ i=1
+ while [ $i -le $max ]; do
+ case "$1" in
+ -a|--add-docker-aptline)
+ APTLINE_DOCKER=1
+ ;;
+ -d|--download-dir)
+ shift; ((i++))
+ DIR_BASE=$1
+ ;;
+ esac
+ shift; ((i++))
+ done
+}
+
+function install_pipework {
+ if ! which /usr/local/bin/pipework >/dev/null
+ then
+ sudo rm -rf $DIR_BASE/pipework
+ git clone https://github.com/jpetazzo/pipework.git
$DIR_BASE/pipework
+ sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
+ fi
+}
+
+function install_depends_pkg {
+ install_pipework
+}
diff --git a/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
new file mode 100644
index 0000000..d8c3b49
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+init_variables
+
+sudo apt-get update
+install_depends_pkg
diff --git a/ryu/lib/docker/quagga.py b/ryu/lib/docker/quagga.py
new file mode 100644
index 0000000..9b6d218
--- /dev/null
+++ b/ryu/lib/docker/quagga.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/quagga.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import netaddr
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/quagga'
+
+ super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
+ self.zebra = zebra
+ self._create_config_debian()
+
+ w_time = super(QuaggaBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ rib = []
+ return self.get_global_rib_with_prefix(prefix, rf)
+
+ out = self.vtysh('show bgp {0} unicast'.format(rf), config=False)
+ return rib
+
+ read_next = False
+
+ ibgp = False
+ line = line[2:]
+ line = line[1:]
+ ibgp = True
+ continue
+
+ elems = line.split()
+
+ read_next = True
+ prefix = elems[0]
+ continue
+ nexthop = elems[0]
+ prefix = elems[0]
+ nexthop = elems[1]
+ read_next = False
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'ibgp': ibgp})
+
+ return rib
+
+ rib = []
+
+ lines = [line.strip() for line in self.vtysh(
+ 'show bgp {0} unicast {1}'.format(rf, prefix),
+ config=False).split('\n')]
+
+ return rib
+
+ lines = lines[2:]
+
+ lines.pop(0) # another useless line
+ lines = lines[2:] # other useless lines
+ raise Exception('unknown output format {0}'.format(lines))
+
+ aspath = []
+ aspath = [int(asn) for asn in lines[0].split()]
+
+ nexthop = lines[1].split()[0].strip()
+ info = [s.strip(',') for s in lines[2].split()]
+ attrs = []
+ med = info[info.index('metric') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
+ 'metric': int(med)})
+ localpref = info[info.index('localpref') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
+ 'value': int(localpref)})
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'aspath': aspath, 'attrs': attrs})
+
+ return rib
+
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
+
+ info = [l.strip() for l in self.vtysh(
+ 'show bgp neighbors {0}'.format(neigh_addr),
+ config=False).split('\n')]
+
+ raise Exception('unknown format')
+
+ idx1 = info[0].index('BGP neighbor is ')
+ idx2 = info[0].index(',')
+ n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
+ idx1 = info[2].index('= ')
+ state = info[2][idx1 + len('= '):]
+ return base.BGP_FSM_IDLE
+ return base.BGP_FSM_ACTIVE
+ return base.BGP_FSM_ESTABLISHED
+ return state
+
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ self.vtysh('clear ip bgp * soft', config=False)
+
+ zebra = 'no'
+ self._create_config_bgp()
+ zebra = 'yes'
+ self._create_config_zebra()
+ self._create_config_daemons(zebra)
+
+ c = base.CmdBuffer()
+ c << 'vtysh_enable=yes'
+ c << 'zebra_options=" --daemon -A 127.0.0.1"'
+ c << 'bgpd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospfd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospf6d_options=" --daemon -A ::1"'
+ c << 'ripd_options=" --daemon -A 127.0.0.1"'
+ c << 'ripngd_options=" --daemon -A ::1"'
+ c << 'isisd_options=" --daemon -A 127.0.0.1"'
+ c << 'babeld_options=" --daemon -A 127.0.0.1"'
+ c << 'watchquagga_enable=yes'
+ c << 'watchquagga_options=(--daemon)'
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'zebra=%s' % zebra
+ c << 'bgpd=yes'
+ c << 'ospfd=no'
+ c << 'ospf6d=no'
+ c << 'ripd=no'
+ c << 'ripngd=no'
+ c << 'isisd=no'
+ c << 'babeld=no'
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+
+ c = base.CmdBuffer()
+ c << 'hostname bgpd'
+ c << 'password zebra'
+ c << 'router bgp {0}'.format(self.asn)
+ c << 'bgp router-id {0}'.format(self.router_id)
+ c << 'bgp graceful-restart'
+
+ version = 4
+ version = netaddr.IPNetwork(info['neigh_addr']).version
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << 'no bgp default ipv4-unicast'
+
+ c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
+ c << 'neighbor {0} route-server-client'.format(n_addr)
+ c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
+ typ)
+ c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
+ c << 'neighbor {0} passive'.format(n_addr)
+ c << 'address-family ipv6 unicast'
+ c << 'neighbor {0} activate'.format(n_addr)
+ c << 'exit-address-family'
+
+ c << 'network {0}'.format(route['prefix'])
+ c << 'address-family ipv6 unicast'
+ c << 'network {0}'.format(route['prefix'])
+ c << 'exit-address-family'
+ raise Exception(
+ 'unsupported route faily: {0}'.format(route['rf']))
+
+ c << 'address-family ipv6 unicast'
+ c << 'redistribute connected'
+ c << 'exit-address-family'
+ c << 'redistribute connected'
+
+ c << 'access-list {0} {1} {2}'.format(name, policy['type'],
+ policy['match'])
+ c << 'route-map {0} permit 10'.format(name)
+ c << 'match ip address {0}'.format(name)
+ c << 'set metric {0}'.format(policy['med'])
+
+ c << 'debug bgp as4'
+ c << 'debug bgp fsm'
+ c << 'debug bgp updates'
+ c << 'debug bgp events'
+ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
+
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'hostname zebra'
+ c << 'password zebra'
+ c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
+ c << 'debug zebra packet'
+ c << 'debug zebra kernel'
+ c << 'debug zebra rib'
+ c << ''
+
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ cmd = [cmd]
+ cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
+ return self.exec_on_ctn(
+ "vtysh -d bgpd -c 'en' -c 'conf t' -c "
+ "'router bgp {0}' {1}".format(self.asn, cmd),
+ capture=True)
+ return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
+ capture=True)
+
+ daemon = []
+ daemon.append('bgpd')
+ daemon.append('zebra')
+ cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
+ self.exec_on_ctn(cmd, capture=True)
+
+
+ def __init__(self, name, config, ctn_image_name,
+ asn = None
+ router_id = None
+ line = line.strip()
+ asn = int(line[len('router bgp'):].strip())
+ router_id = line[len('bgp router-id'):].strip()
+ raise Exception('asn not in quagga config')
+ raise Exception('router-id not in quagga config')
+ self.config = config
+ super(RawQuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name, zebra)
+
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(self.config)
+ f.writelines(self.config)
diff --git a/ryu/lib/docker/ryubgp.py b/ryu/lib/docker/ryubgp.py
new file mode 100644
index 0000000..8fe16f4
--- /dev/null
+++ b/ryu/lib/docker/ryubgp.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import time
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/ryu'
+
+ super(RyuBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
+ self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
+ self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
+ self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
+
+ c = base.CmdBuffer()
+ c << '[DEFAULT]'
+ c << 'verbose=True'
+ c << 'log_file=/etc/ryu/manager.log'
+ LOG.info("[%s's new config]" % self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'import os'
+ c << ''
+ c << 'BGP = {'
+ c << " 'local_as': %s," % str(self.asn)
+ c << " 'router_id': '%s'," % self.router_id
+ c << " 'neighbors': ["
+ c << " {"
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << " 'address': '%s'," % n_addr
+ c << " 'remote_as': %s," % str(peer.asn)
+ c << " 'enable_ipv4': True,"
+ c << " 'enable_ipv6': True,"
+ c << " 'enable_vpnv4': True,"
+ c << " 'enable_vpnv6': True,"
+ c << ' },'
+ c << ' ],'
+ c << " 'routes': ["
+ c << " {"
+ c << " 'prefix': '%s'," % route['prefix']
+ c << " },"
+ c << " ],"
+ c << "}"
+ log_conf = """LOGGING = {
+
+ # We use python logging package for logging.
+ 'version': 1,
+ 'disable_existing_loggers': False,
+
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s ' +
+ '[%(process)d %(thread)d] %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(asctime)s %(module)s %(lineno)s ' +
+ '%(message)s'
+ },
+ 'stats': {
+ 'format': '%(message)s'
+ },
+ },
+
+ 'handlers': {
+ # Outputs log to console.
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ 'console_stats': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'stats'
+ },
+ # Rotates log file when its size reaches 10MB.
+ 'log_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'bgpspeaker.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'verbose'
+ },
+ 'stats_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'statistics_bgps.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'stats'
+ },
+ },
+
+ # Fine-grained control of logging per instance.
+ 'loggers': {
+ 'bgpspeaker': {
+ 'handlers': ['console', 'log_file'],
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'stats': {
+ 'handlers': ['stats_file', 'console_stats'],
+ 'level': 'INFO',
+ 'propagate': False,
+ 'formatter': 'stats',
+ },
+ },
+
+ # Root loggers.
+ 'root': {
+ 'handlers': ['console', 'log_file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+}"""
+ c << log_conf
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ self._create_config_ryu()
+ self._create_config_ryu_bgp()
+
+ results = self.exec_on_ctn('ps ax')
+ running = False
+ running = True
+ return running
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = "ryu-manager --verbose "
+ cmd += "--config-file %s " % self.SHARED_RYU_CONF
+ cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
+ cmd += "ryu.services.protocols.bgp.application"
+ self.exec_on_ctn(cmd, detach=True)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
+ self.exec_on_ctn(cmd)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ w_time = super(RyuBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ self.stop_ryubgp(retry=True)
+ self.start_ryubgp(retry=True)
diff --git a/ryu/tests/integrated/common/__init__.py
b/ryu/tests/integrated/common/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/ryu/tests/integrated/common/docker_base.py
b/ryu/tests/integrated/common/docker_base.py
deleted file mode 100644
index 1ae2cc2..0000000
--- a/ryu/tests/integrated/common/docker_base.py
+++ /dev/null
@@ -1,801 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/base.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import itertools
-import logging
-import os
-import subprocess
-import time
-
-import netaddr
-import six
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_TEST_PREFIX = ''
-DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
-TEST_PREFIX = DEFAULT_TEST_PREFIX
-TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
-
-BGP_FSM_IDLE = 'BGP_FSM_IDLE'
-BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
-BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
-
-BGP_ATTR_TYPE_ORIGIN = 1
-BGP_ATTR_TYPE_AS_PATH = 2
-BGP_ATTR_TYPE_NEXT_HOP = 3
-BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
-BGP_ATTR_TYPE_LOCAL_PREF = 5
-BGP_ATTR_TYPE_COMMUNITIES = 8
-BGP_ATTR_TYPE_ORIGINATOR_ID = 9
-BGP_ATTR_TYPE_CLUSTER_LIST = 10
-BGP_ATTR_TYPE_MP_REACH_NLRI = 14
-BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
-
-BRIDGE_TYPE_DOCKER = 'docker'
-BRIDGE_TYPE_BRCTL = 'brctl'
-BRIDGE_TYPE_OVS = 'ovs'
-
-
- super(CommandError, self).__init__()
- self.out = out
-
-
- e = RuntimeError()
- r = f()
- time.sleep(s)
- return r
- raise e
-
-
- super(CmdBuffer, self).__init__()
- self.delim = delim
-
- self.append(value)
-
- return self.delim.join(self)
-
-
-
- stdout = stdout or ''
- obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
- obj.stderr = stderr or ''
- obj.command = command
- obj.returncode = returncode
- return obj
-
-
-
- """Execute a command using subprocess.Popen()
- - out: stdout from subprocess.Popen()
- out has some attributes.
- out.returncode: returncode of subprocess.Popen()
- out.stderr: stderr from subprocess.Popen()
- """
- p_stdout = subprocess.PIPE
- p_stderr = subprocess.PIPE
- p_stdout = None
- p_stderr = None
- pop = subprocess.Popen(cmd, shell=True, executable=executable,
- stdout=p_stdout,
- stderr=p_stderr)
- __stdout, __stderr = pop.communicate()
- _stdout = six.text_type(__stdout, 'utf-8')
- _stderr = six.text_type(__stderr, 'utf-8')
- out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
- return out
-
- out = None
- out = self._execute(cmd, capture=capture)
- LOG.info(out.command)
- return out
- LOG.error("stdout: %s", out)
- LOG.error("stderr: %s", out.stderr)
- break
- time.sleep(interval)
- raise CommandError(out)
-
- cmd = 'sudo %s' % cmd
- return self.execute(cmd, capture=capture,
- try_times=try_times, interval=interval)
-
-
- self.baseimage = baseimage
- self.cmd = Command()
-
- out = self.cmd.sudo('sudo docker images')
- images = []
- images.append(line.split()[0])
- return images
-
- return name in self.get_images()
-
- self.cmd.sudo(
- "docker build -t {0} {1}".format(tagname, dockerfile_dir),
- try_times=3)
-
- return tagname
- self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
-
- def create_quagga(self, tagname='quagga', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- pkges = ' '.join([
- 'telnet',
- 'tcpdump',
- 'quagga',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'RUN apt-get update'
- c << 'RUN apt-get install -qy --no-install-recommends %s' % pkges
- c << 'CMD /usr/lib/quagga/bgpd'
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
- self.build(tagname, workdir)
- return tagname
-
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- workdir_ctn = '/root/osrg/ryu'
- pkges = ' '.join([
- 'tcpdump',
- 'iproute2',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'ADD ryu %s' % workdir_ctn
- install = ' '.join([
- 'RUN apt-get update',
- '&& apt-get install -qy --no-install-recommends %s' % pkges,
- '&& cd %s' % workdir_ctn,
- # Note: Clean previous builds, because "python setup.py install"
- # might fail if the current directory contains the symlink to
- # Docker host file systems.
- '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
- '&& pip install -r tools/pip-requires -r
tools/optional-requires',
- '&& python setup.py install',
- ])
- c << install
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
- self.cmd.execute('cp -r ../ryu %s/' % workdir)
- self.build(tagname, workdir)
- return tagname
-
-
- def __init__(self, name, subnet='', start_ip=None, end_ip=None,
- with_ip=True, self_ip=False,
- fixed_ip=None, reuse=False,
- """Manage a bridge
- - name: bridge name
- - subnet: network cider to be used in this bridge
- - start_ip: start address of an ip to be used in the subnet
- - end_ip: end address of an ip to be used in the subnet
- - with_ip: specify if assign automatically an ip address
- - self_ip: specify if assign an ip address for the bridge
- - fixed_ip: an ip address to be assigned to the bridge
- - reuse: specify if use an existing bridge
- - br_type: One either in a 'docker', 'brctl' or 'ovs'
- """
- self.cmd = Command()
- self.name = name
- if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
- raise Exception("argument error br_type: %s" % br_type)
- self.br_type = br_type
- self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
- self.name = '{0}_{1}'.format(TEST_PREFIX, name)
- self.with_ip = with_ip
- self.subnet = netaddr.IPNetwork(subnet)
- self.start_ip = start_ip
- self.start_ip = netaddr.IPAddress(self.subnet.first)
- self.end_ip = end_ip
- self.end_ip = netaddr.IPAddress(self.subnet.last)
-
- yield host
- self._ip_generator = _ip_gen()
- # throw away first network address
- self.next_ip_address()
-
- self.self_ip = self_ip
- self.ip_addr = fixed_ip
- self.ip_addr = self.next_ip_address()
- gw = "--gateway %s" % self.ip_addr.split('/')[0]
- v6 = ''
- v6 = '--ipv6'
- cmd = ("docker network create --driver bridge %s "
- "%s --subnet %s %s" % (v6, gw, subnet, self.name))
- cmd = "ip link add {0} type bridge".format(self.name)
- cmd = "ovs-vsctl add-br {0}".format(self.name)
- raise ValueError('Unsupported br_type: %s' % self.br_type)
- self.delete()
- self.execute(cmd, sudo=True, retry=True)
- try_several_times(f)
- self.execute("ip link set up dev {0}".format(self.name),
- sudo=True, retry=True)
-
- ips = self.check_br_addr(self.name)
- self.execute(
- "ip addr del {0} dev {1}".format(ip, self.name),
- sudo=True, retry=True)
- self.execute(
- "ip addr add {0} dev {1}".format(self.ip_addr, self.name
),
- sudo=True, retry=True)
- self.ctns = []
-
- out = self.execute('docker network ls', sudo=True, retry=True)
- bridges = []
- bridges.append(line.split()[1])
- return bridges
-
- out = self.execute('brctl show', retry=True)
- bridges = []
- bridges.append(line.split()[0])
- return bridges
-
- out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
- return out.splitlines()
-
- return self.get_bridges_dc()
- return self.get_bridges_brctl()
- return self.get_bridges_ovs()
-
- return self.name in self.get_bridges()
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3, interval=1)
- return m(cmd, capture=capture)
-
- ips = {}
- cmd = "ip a show dev %s" % br
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[4] = elems[1]
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[6] = elems[1]
- return ips
-
- return "{0}/{1}".format(next(self._ip_generator),
- self.subnet.prefixlen)
-
- name = ctn.next_if_name()
- self.ctns.append(ctn)
- ip_address = None
- ipv4 = None
- ipv6 = None
- ip_address = self.next_ip_address()
- ip_address_ip = ip_address.split('/')[0]
- version = 4
- version = 6
- opt_ip = "--ip %s" % ip_address_ip
- ipv4 = ip_address
- opt_ip = "--ip6 %s" % ip_address_ip
- ipv6 = ip_address
- cmd = "docker network connect %s %s %s" % (
- opt_ip, self.name, ctn.docker_name())
- self.execute(cmd, sudo=True)
- ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
- ifname=name)
- ip_address = self.next_ip_address()
- version = 4
- version = 6
- ctn.pipework(self, ip_address, name, version=version)
- ctn.pipework(self, '0/0', name)
- return ip_address
-
- return
- self.execute("docker network rm %s" % self.name,
- sudo=True, retry=True)
- self.execute("ip link set down dev %s" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ip link delete %s type bridge" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ovs-vsctl del-br %s" % self.name,
- sudo=True, retry=True)
-
-
- self.name = name
- self.image = image
- self.shared_volumes = []
- self.ip_addrs = []
- self.ip6_addrs = []
- self.is_running = False
- self.eths = []
- self.id = None
-
- self.cmd = Command()
- self.remove()
-
- return self.name
- return '{0}_{1}'.format(TEST_PREFIX, self.name)
-
- return self.id
- return self.docker_name()
-
- name = 'eth{0}'.format(len(self.eths) + 1)
- self.eths.append(name)
- return name
-
- self.ip_addrs.append((ifname, ipv4, bridge))
- self.ip6_addrs.append((ifname, ipv6, bridge))
-
- addrinfo = {}
- ip_addrs = self.ip_addrs
- ip_addrs = self.ip6_addrs
- return None
- addrinfo[addr[1]] = addr[0]
- return addrinfo
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3, interval=1)
- return m(cmd, capture=capture)
-
- return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
- return self.cmd.sudo(cmd, capture=capture)
-
- name = self.docker_name()
- flag = '-d' if detach else ''
- return self.dcexec('docker exec {0} {1} {2}'.format(
- flag, name, cmd), capture=capture)
-
- cmd = 'docker ps --no-trunc=true'
- cmd += ' --all=true'
- out = self.dcexec(cmd, retry=True)
- containers = []
- containers.append(line.split()[-1])
- return containers
-
- return self.docker_name() in self.get_containers(allctn=allctn)
-
- c = CmdBuffer(' ')
- c << "docker run --privileged=true"
- c << "-v {0}:{1}".format(sv[0], sv[1])
- c << "--name {0} --hostname {0} -id {1}".format(self.docker_name()
,
- self.image)
- self.id = self.dcexec(str(c), retry=True)
- self.is_running = True
- self.exec_on_ctn("ip li set up dev lo")
- ipv4 = None
- ipv6 = None
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv4 = elems[1]
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv6 = elems[1]
- self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
- ifname='eth0')
- return 0
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- LOG.warning('Call run() before pipeworking')
- return
- c = CmdBuffer(' ')
- c << "pipework {0}".format(bridge.name)
-
- c << "-i {0}".format(intf_name)
- intf_name = "eth1"
- ipv4 = None
- ipv6 = None
- ipv4 = ip_addr
- c << '-a 6'
- ipv6 = ip_addr
- c << "{0} {1}".format(self.docker_name(), ip_addr)
- self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
- ifname=intf_name)
- self.execute(str(c), sudo=True, retry=True)
-
- cmd = "docker inspect -f '{{.State.Pid}}' %s" %
self.docker_name()
- return int(self.dcexec(cmd))
- return -1
-
- interface = "eth0"
- filename = "{0}/{1}.dump".format(
- self.shared_volumes[0][1], interface)
- self.exec_on_ctn(
- "tcpdump -i {0} -w {1}".format(interface, filename),
- detach=True)
-
-
-
- WAIT_FOR_BOOT = 1
- RETRY_INTERVAL = 5
- DEFAULT_PEER_ARG = {'neigh_addr': '',
- 'passwd': None,
- 'vpn': False,
- 'flowspec': False,
- 'is_rs_client': False,
- 'is_rr_client': False,
- 'cluster_id': None,
- 'policies': None,
- 'passive': False,
- 'local_addr': '',
- 'as2': False,
- 'graceful_restart': None,
- 'local_as': None,
- 'prefix_limit': None}
- default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
- DEFAULT_ROUTE_ARG = {'prefix': None,
- 'rf': 'ipv4',
- 'attr': None,
- 'next-hop': None,
- 'as-path': None,
- 'community': None,
- 'med': None,
- 'local-pref': None,
- 'extended-community': None,
- 'matchs': None,
- 'thens': None}
- default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
-
- self.config_dir = TEST_BASE_DIR
- self.config_dir = os.path.join(self.config_dir, TEST_PREFIX)
- self.config_dir = os.path.join(self.config_dir, name)
- self.asn = asn
- self.router_id = router_id
- self.peers = {}
- self.routes = {}
- self.policies = {}
- super(BGPContainer, self).__init__(name, ctn_image_name)
- self.execute(
- 'rm -rf {0}'.format(self.config_dir), sudo=True)
- self.execute('mkdir -p {0}'.format(self.config_dir))
- self.execute('chmod 777 {0}'.format(self.config_dir))
-
- return str({'name': self.name, 'asn': self.asn,
- 'router_id': self.router_id})
-
- self.create_config()
- super(BGPContainer, self).run()
- time.sleep(w_time)
- return w_time
-
- def add_peer(self, peer, bridge='', reload_config=True, v6=False,
- peer_info = peer_info or {}
- self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
- self.peers[peer].update(peer_info)
- peer_keys = sorted(self.peers[peer].keys())
- raise Exception("argument error peer_info: %s" % peer_info)
-
- neigh_addr = ''
- local_addr = ''
- it = itertools.product(self.ip_addrs, peer.ip_addrs)
- it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
-
- continue
- neigh_addr = you[1]
- local_addr = me[1]
- addr, mask = local_addr.split('/')
- local_addr = "{0}%{1}/{2}".format(addr, me[0], mask)
- break
-
- raise Exception('peer {0} seems not ip
reachable'.format(peer))
-
- self.peers[peer]['policies'] = {}
-
- self.peers[peer]['neigh_addr'] = neigh_addr
- self.peers[peer]['local_addr'] = local_addr
- self.create_config()
- self.reload_config()
-
- del self.peers[peer]
- self.create_config()
- self.reload_config()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- return self.execute('cat {0}/*.log'.format(self.config_dir))
-
- route_info = route_info or {}
- self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
- self.routes[route].update(route_info)
- route_keys = sorted(self.routes[route].keys())
- raise Exception("argument error route_info: %s" % route_info)
- self.routes[route]['prefix'] = route
- self.create_config()
- self.reload_config()
-
- def add_policy(self, policy, peer, typ, default='accept',
- self.set_default_policy(peer, typ, default)
- self.define_policy(policy)
- self.assign_policy(peer, policy, typ)
- self.create_config()
- self.reload_config()
-
- if (typ in ['in', 'out', 'import', 'export'] and
- self.peers[peer]['default-policy'] = {}
- self.peers[peer]['default-policy'][typ] = default
- raise Exception('wrong type or default')
-
- self.policies[policy['name']] = policy
-
- raise Exception('peer {0} not found'.format(peer.name))
- name = policy['name']
- raise Exception('policy {0} not found'.format(name))
- self.peers[peer]['policies'][typ] = policy
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- version = netaddr.IPNetwork(prefix).version
- addr = prefix.split('/')[0]
- ping_cmd = 'ping'
- ping_cmd = 'ping6'
- raise Exception(
- 'unsupported route family: {0}'.format(version))
- cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format(
- ping_cmd, addr)
- interval = 1
- count = 0
- res = self.exec_on_ctn(cmd)
- LOG.info(res)
- break
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
- return True
-
- interval = 1
- count = 0
- state = self.get_neighbor_state(peer)
- LOG.info("%s's peer %s state: %s",
- self.router_id, peer.router_id, state)
- return
-
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
-
- cmd = '/sbin/ip route add {0} via {1}'.format(network, next_hop)
- self.exec_on_ctn(cmd)
-
- cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
- self.exec_on_ctn(cmd)
-
- raise NotImplementedError()
-
- raise NotImplementedError()
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg.sh
b/ryu/tests/integrated/common/install_docker_test_pkg.sh
deleted file mode 100644
index a771dfc..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-function add_docker_aptline {
- sudo apt-get update
- if ! apt-cache search docker-engine | grep docker-engine; then
- VER=`lsb_release -r`
- if echo $VER | grep 12.04; then
- REL_NAME=precise
- elif echo $VER | grep 14.04; then
- REL_NAME=trusty
- elif echo $VER | grep 15.10; then
- REL_NAME=wily
- elif echo $VER | grep 16.04; then
- REL_NAME=xenial
- else
- retrun 1
- fi
- RELEASE=ubuntu-$REL_NAME
- sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
- sudo sh -c "echo deb https://apt.dockerproject.org/repo $RELEASE
main > /etc/apt/sources.list.d/docker.list"
- fi
-}
-
-init_variables
-
-if [ $APTLINE_DOCKER -eq 1 ]; then
- add_docker_aptline
-fi
-
-sudo apt-get update
-if apt-cache search docker-engine | grep docker-engine; then
- DOCKER_PKG=docker-engine
-else
- DOCKER_PKG=docker.io
-fi
-sudo apt-get install -y $DOCKER_PKG
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
b/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
deleted file mode 100644
index 44a3e10..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-set -ex
-
-function init_variables {
- APTLINE_DOCKER=0
- DIR_BASE=/tmp
-}
-
-function process_options {
- local max
- local i
- max=$#
- i=1
- while [ $i -le $max ]; do
- case "$1" in
- -a|--add-docker-aptline)
- APTLINE_DOCKER=1
- ;;
- -d|--download-dir)
- shift; ((i++))
- DIR_BASE=$1
- ;;
- esac
- shift; ((i++))
- done
-}
-
-function install_pipework {
- if ! which /usr/local/bin/pipework >/dev/null
- then
- sudo rm -rf $DIR_BASE/pipework
- git clone https://github.com/jpetazzo/pipework.git
$DIR_BASE/pipework
- sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
- fi
-}
-
-function install_depends_pkg {
- install_pipework
-}
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
b/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
deleted file mode 100644
index d8c3b49..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-init_variables
-
-sudo apt-get update
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/quagga.py
b/ryu/tests/integrated/common/quagga.py
deleted file mode 100644
index 9b6d218..0000000
--- a/ryu/tests/integrated/common/quagga.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/quagga.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-
-import netaddr
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/quagga'
-
- super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
- self.zebra = zebra
- self._create_config_debian()
-
- w_time = super(QuaggaBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- rib = []
- return self.get_global_rib_with_prefix(prefix, rf)
-
- out = self.vtysh('show bgp {0} unicast'.format(rf), config=False)
- return rib
-
- read_next = False
-
- ibgp = False
- line = line[2:]
- line = line[1:]
- ibgp = True
- continue
-
- elems = line.split()
-
- read_next = True
- prefix = elems[0]
- continue
- nexthop = elems[0]
- prefix = elems[0]
- nexthop = elems[1]
- read_next = False
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'ibgp': ibgp})
-
- return rib
-
- rib = []
-
- lines = [line.strip() for line in self.vtysh(
- 'show bgp {0} unicast {1}'.format(rf, prefix),
- config=False).split('\n')]
-
- return rib
-
- lines = lines[2:]
-
- lines.pop(0) # another useless line
- lines = lines[2:] # other useless lines
- raise Exception('unknown output format {0}'.format(lines))
-
- aspath = []
- aspath = [int(asn) for asn in lines[0].split()]
-
- nexthop = lines[1].split()[0].strip()
- info = [s.strip(',') for s in lines[2].split()]
- attrs = []
- med = info[info.index('metric') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
- 'metric': int(med)})
- localpref = info[info.index('localpref') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
- 'value': int(localpref)})
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'aspath': aspath, 'attrs': attrs})
-
- return rib
-
- raise Exception('not found peer {0}'.format(peer.router_id))
-
- neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
-
- info = [l.strip() for l in self.vtysh(
- 'show bgp neighbors {0}'.format(neigh_addr),
- config=False).split('\n')]
-
- raise Exception('unknown format')
-
- idx1 = info[0].index('BGP neighbor is ')
- idx2 = info[0].index(',')
- n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
- idx1 = info[2].index('= ')
- state = info[2][idx1 + len('= '):]
- return base.BGP_FSM_IDLE
- return base.BGP_FSM_ACTIVE
- return base.BGP_FSM_ESTABLISHED
- return state
-
- raise Exception('not found peer {0}'.format(peer.router_id))
-
- self.vtysh('clear ip bgp * soft', config=False)
-
- zebra = 'no'
- self._create_config_bgp()
- zebra = 'yes'
- self._create_config_zebra()
- self._create_config_daemons(zebra)
-
- c = base.CmdBuffer()
- c << 'vtysh_enable=yes'
- c << 'zebra_options=" --daemon -A 127.0.0.1"'
- c << 'bgpd_options=" --daemon -A 127.0.0.1"'
- c << 'ospfd_options=" --daemon -A 127.0.0.1"'
- c << 'ospf6d_options=" --daemon -A ::1"'
- c << 'ripd_options=" --daemon -A 127.0.0.1"'
- c << 'ripngd_options=" --daemon -A ::1"'
- c << 'isisd_options=" --daemon -A 127.0.0.1"'
- c << 'babeld_options=" --daemon -A 127.0.0.1"'
- c << 'watchquagga_enable=yes'
- c << 'watchquagga_options=(--daemon)'
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'zebra=%s' % zebra
- c << 'bgpd=yes'
- c << 'ospfd=no'
- c << 'ospf6d=no'
- c << 'ripd=no'
- c << 'ripngd=no'
- c << 'isisd=no'
- c << 'babeld=no'
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
-
- c = base.CmdBuffer()
- c << 'hostname bgpd'
- c << 'password zebra'
- c << 'router bgp {0}'.format(self.asn)
- c << 'bgp router-id {0}'.format(self.router_id)
- c << 'bgp graceful-restart'
-
- version = 4
- version = netaddr.IPNetwork(info['neigh_addr']).version
- n_addr = info['neigh_addr'].split('/')[0]
- c << 'no bgp default ipv4-unicast'
-
- c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
- c << 'neighbor {0} route-server-client'.format(n_addr)
- c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
- typ)
- c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
- c << 'neighbor {0} passive'.format(n_addr)
- c << 'address-family ipv6 unicast'
- c << 'neighbor {0} activate'.format(n_addr)
- c << 'exit-address-family'
-
- c << 'network {0}'.format(route['prefix'])
- c << 'address-family ipv6 unicast'
- c << 'network {0}'.format(route['prefix'])
- c << 'exit-address-family'
- raise Exception(
- 'unsupported route faily: {0}'.format(route['rf']))
-
- c << 'address-family ipv6 unicast'
- c << 'redistribute connected'
- c << 'exit-address-family'
- c << 'redistribute connected'
-
- c << 'access-list {0} {1} {2}'.format(name, policy['type'],
- policy['match'])
- c << 'route-map {0} permit 10'.format(name)
- c << 'match ip address {0}'.format(name)
- c << 'set metric {0}'.format(policy['med'])
-
- c << 'debug bgp as4'
- c << 'debug bgp fsm'
- c << 'debug bgp updates'
- c << 'debug bgp events'
- c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
-
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'hostname zebra'
- c << 'password zebra'
- c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
- c << 'debug zebra packet'
- c << 'debug zebra kernel'
- c << 'debug zebra rib'
- c << ''
-
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- cmd = [cmd]
- cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
- return self.exec_on_ctn(
- "vtysh -d bgpd -c 'en' -c 'conf t' -c "
- "'router bgp {0}' {1}".format(self.asn, cmd),
- capture=True)
- return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
- capture=True)
-
- daemon = []
- daemon.append('bgpd')
- daemon.append('zebra')
- cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
- self.exec_on_ctn(cmd, capture=True)
-
-
- def __init__(self, name, config, ctn_image_name,
- asn = None
- router_id = None
- line = line.strip()
- asn = int(line[len('router bgp'):].strip())
- router_id = line[len('bgp router-id'):].strip()
- raise Exception('asn not in quagga config')
- raise Exception('router-id not in quagga config')
- self.config = config
- super(RawQuaggaBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name, zebra)
-
- LOG.info("[%s's new config]", self.name)
- LOG.info(self.config)
- f.writelines(self.config)
diff --git a/ryu/tests/integrated/common/ryubgp.py
b/ryu/tests/integrated/common/ryubgp.py
deleted file mode 100644
index 8fe16f4..0000000
--- a/ryu/tests/integrated/common/ryubgp.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-import time
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/ryu'
-
- super(RyuBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
- self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
- self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
- self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
-
- c = base.CmdBuffer()
- c << '[DEFAULT]'
- c << 'verbose=True'
- c << 'log_file=/etc/ryu/manager.log'
- LOG.info("[%s's new config]" % self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'import os'
- c << ''
- c << 'BGP = {'
- c << " 'local_as': %s," % str(self.asn)
- c << " 'router_id': '%s'," % self.router_id
- c << " 'neighbors': ["
- c << " {"
- n_addr = info['neigh_addr'].split('/')[0]
- c << " 'address': '%s'," % n_addr
- c << " 'remote_as': %s," % str(peer.asn)
- c << " 'enable_ipv4': True,"
- c << " 'enable_ipv6': True,"
- c << " 'enable_vpnv4': True,"
- c << " 'enable_vpnv6': True,"
- c << ' },'
- c << ' ],'
- c << " 'routes': ["
- c << " {"
- c << " 'prefix': '%s'," % route['prefix']
- c << " },"
- c << " ],"
- c << "}"
- log_conf = """LOGGING = {
-
- # We use python logging package for logging.
- 'version': 1,
- 'disable_existing_loggers': False,
-
- 'formatters': {
- 'verbose': {
- 'format': '%(levelname)s %(asctime)s %(module)s ' +
- '[%(process)d %(thread)d] %(message)s'
- },
- 'simple': {
- 'format': '%(levelname)s %(asctime)s %(module)s %(lineno)s ' +
- '%(message)s'
- },
- 'stats': {
- 'format': '%(message)s'
- },
- },
-
- 'handlers': {
- # Outputs log to console.
- 'console': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'simple'
- },
- 'console_stats': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'stats'
- },
- # Rotates log file when its size reaches 10MB.
- 'log_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'bgpspeaker.log'),
- 'maxBytes': '10000000',
- 'formatter': 'verbose'
- },
- 'stats_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'statistics_bgps.log'),
- 'maxBytes': '10000000',
- 'formatter': 'stats'
- },
- },
-
- # Fine-grained control of logging per instance.
- 'loggers': {
- 'bgpspeaker': {
- 'handlers': ['console', 'log_file'],
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'stats': {
- 'handlers': ['stats_file', 'console_stats'],
- 'level': 'INFO',
- 'propagate': False,
- 'formatter': 'stats',
- },
- },
-
- # Root loggers.
- 'root': {
- 'handlers': ['console', 'log_file'],
- 'level': 'DEBUG',
- 'propagate': True,
- },
-}"""
- c << log_conf
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- self._create_config_ryu()
- self._create_config_ryu_bgp()
-
- results = self.exec_on_ctn('ps ax')
- running = False
- running = True
- return running
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = "ryu-manager --verbose "
- cmd += "--config-file %s " % self.SHARED_RYU_CONF
- cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
- cmd += "ryu.services.protocols.bgp.application"
- self.exec_on_ctn(cmd, detach=True)
- result = True
- break
- time.sleep(1)
- return result
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
- self.exec_on_ctn(cmd)
- result = True
- break
- time.sleep(1)
- return result
-
- w_time = super(RyuBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- self.stop_ryubgp(retry=True)
- self.start_ryubgp(retry=True)
diff --git a/tests/integrated/bgp/base.py b/tests/integrated/bgp/base.py
index 26fa396..2f210de 100644
--- a/tests/integrated/bgp/base.py
+++ b/tests/integrated/bgp/base.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/base_ip6.py b/tests/integrated/bgp/base_
ip6.py
index be26faf..d867920 100644
--- a/tests/integrated/bgp/base_ip6.py
+++ b/tests/integrated/bgp/base_ip6.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/test_basic.py
b/tests/integrated/bgp/test_basic.py
index 5817d44..d1eda39 100644
--- a/tests/integrated/bgp/test_basic.py
+++ b/tests/integrated/bgp/test_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base
diff --git a/tests/integrated/bgp/test_ip6_basic.py
b/tests/integrated/bgp/test_ip6_basic.py
index 40461a5..911a0b5 100644
--- a/tests/integrated/bgp/test_ip6_basic.py
+++ b/tests/integrated/bgp/test_ip6_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base_ip6 as base
--
2.7.4
------------------------------------------------------------
------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
Ryu-devel mailing list
https://lists.sourceforge.net/lists/listinfo/ryu-devel
--
fumihiko kakuma <***@valinux.co.jp>
Takashi YAMAMOTO
2017-07-13 00:15:25 UTC
Permalink
Post by fumihiko kakuma
Hi Yamamoto-San,
On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.
is it difficult to avoid breaking it?
eg. make it try-import new and old locations.
Post by fumihiko kakuma
Thanks,
Kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
This patch requires the v2 patch of Iwase-San.
---
.travis.yml | 2 +-
ryu/lib/docker/__init__.py | 0
ryu/lib/docker/docker_base.py | 801
+++++++++++++++++++++
ryu/lib/docker/install_docker_test_pkg.sh | 43 ++
ryu/lib/docker/install_docker_test_pkg_common.sh | 39 +
.../docker/install_docker_test_pkg_for_travis.sh | 12 +
ryu/lib/docker/quagga.py | 332 +++++++++
ryu/lib/docker/ryubgp.py | 212 ++++++
ryu/tests/integrated/common/__init__.py | 0
ryu/tests/integrated/common/docker_base.py | 801
---------------------
.../integrated/common/install_docker_test_pkg.sh | 43 --
.../common/install_docker_test_pkg_common.sh | 39 -
.../common/install_docker_test_pkg_for_travis.sh | 12 -
ryu/tests/integrated/common/quagga.py | 332 ---------
ryu/tests/integrated/common/ryubgp.py | 212 ------
tests/integrated/bgp/base.py | 6 +-
tests/integrated/bgp/base_ip6.py | 6 +-
tests/integrated/bgp/test_basic.py | 2 +-
tests/integrated/bgp/test_ip6_basic.py | 2 +-
19 files changed, 1448 insertions(+), 1448 deletions(-)
create mode 100644 ryu/lib/docker/__init__.py
create mode 100644 ryu/lib/docker/docker_base.py
create mode 100644 ryu/lib/docker/install_docker_test_pkg.sh
create mode 100644 ryu/lib/docker/install_docker_test_pkg_common.sh
create mode 100644 ryu/lib/docker/install_docker_
test_pkg_for_travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
create mode 100644 ryu/lib/docker/quagga.py
create mode 100644 ryu/lib/docker/ryubgp.py
delete mode 100644 ryu/tests/integrated/common/__init__.py
delete mode 100644 ryu/tests/integrated/common/docker_base.py
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
common.sh
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_for_travis.sh
delete mode 100644 ryu/tests/integrated/common/quagga.py
delete mode 100644 ryu/tests/integrated/common/ryubgp.py
diff --git a/.travis.yml b/.travis.yml
index 9e5474a..cd35aac 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ sudo: required # Required to enable Docker service
- pip install tox coveralls
- - bash ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
+ - bash ryu/lib/docker/install_docker_test_pkg_for_travis.sh
- NOSE_VERBOSE=0 tox -e $TOX_ENV
diff --git a/ryu/lib/docker/__init__.py b/ryu/lib/docker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/ryu/lib/docker/docker_base.py
b/ryu/lib/docker/docker_base.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
new file mode 100644
index 0000000..1ae2cc2
--- /dev/null
+++ b/ryu/lib/docker/docker_base.py
@@ -0,0 +1,801 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/base.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import itertools
+import logging
+import os
+import subprocess
+import time
+
+import netaddr
+import six
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TEST_PREFIX = ''
+DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
+TEST_PREFIX = DEFAULT_TEST_PREFIX
+TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
+
+BGP_FSM_IDLE = 'BGP_FSM_IDLE'
+BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
+BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
+
+BGP_ATTR_TYPE_ORIGIN = 1
+BGP_ATTR_TYPE_AS_PATH = 2
+BGP_ATTR_TYPE_NEXT_HOP = 3
+BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
+BGP_ATTR_TYPE_LOCAL_PREF = 5
+BGP_ATTR_TYPE_COMMUNITIES = 8
+BGP_ATTR_TYPE_ORIGINATOR_ID = 9
+BGP_ATTR_TYPE_CLUSTER_LIST = 10
+BGP_ATTR_TYPE_MP_REACH_NLRI = 14
+BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+
+BRIDGE_TYPE_DOCKER = 'docker'
+BRIDGE_TYPE_BRCTL = 'brctl'
+BRIDGE_TYPE_OVS = 'ovs'
+
+
+ super(CommandError, self).__init__()
+ self.out = out
+
+
+ e = RuntimeError()
+ r = f()
+ time.sleep(s)
+ return r
+ raise e
+
+
+ super(CmdBuffer, self).__init__()
+ self.delim = delim
+
+ self.append(value)
+
+ return self.delim.join(self)
+
+
+
+ stdout = stdout or ''
+ obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
+ obj.stderr = stderr or ''
+ obj.command = command
+ obj.returncode = returncode
+ return obj
+
+
+
+ """Execute a command using subprocess.Popen()
+ - out: stdout from subprocess.Popen()
+ out has some attributes.
+ out.returncode: returncode of subprocess.Popen()
+ out.stderr: stderr from subprocess.Popen()
+ """
+ p_stdout = subprocess.PIPE
+ p_stderr = subprocess.PIPE
+ p_stdout = None
+ p_stderr = None
+ pop = subprocess.Popen(cmd, shell=True, executable=executable,
+ stdout=p_stdout,
+ stderr=p_stderr)
+ __stdout, __stderr = pop.communicate()
+ _stdout = six.text_type(__stdout, 'utf-8')
+ _stderr = six.text_type(__stderr, 'utf-8')
+ out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
+ return out
+
+ out = None
+ out = self._execute(cmd, capture=capture)
+ LOG.info(out.command)
+ return out
+ LOG.error("stdout: %s", out)
+ LOG.error("stderr: %s", out.stderr)
+ break
+ time.sleep(interval)
+ raise CommandError(out)
+
+ cmd = 'sudo %s' % cmd
+ return self.execute(cmd, capture=capture,
+ try_times=try_times, interval=interval)
+
+
+ self.baseimage = baseimage
+ self.cmd = Command()
+
+ out = self.cmd.sudo('sudo docker images')
+ images = []
+ images.append(line.split()[0])
+ return images
+
+ return name in self.get_images()
+
+ self.cmd.sudo(
+ "docker build -t {0} {1}".format(tagname, dockerfile_dir),
+ try_times=3)
+
+ return tagname
+ self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
+
+ def create_quagga(self, tagname='quagga', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ pkges = ' '.join([
+ 'telnet',
+ 'tcpdump',
+ 'quagga',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'RUN apt-get update'
+ c << 'RUN apt-get install -qy --no-install-recommends %s' %
pkges
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'CMD /usr/lib/quagga/bgpd'
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.build(tagname, workdir)
+ return tagname
+
+ def create_ryu(self, tagname='ryu', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ workdir_ctn = '/root/osrg/ryu'
+ pkges = ' '.join([
+ 'tcpdump',
+ 'iproute2',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'ADD ryu %s' % workdir_ctn
+ install = ' '.join([
+ 'RUN apt-get update',
+ '&& apt-get install -qy --no-install-recommends %s' %
pkges,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ '&& cd %s' % workdir_ctn,
+ # Note: Clean previous builds, because "python setup.py install"
+ # might fail if the current directory contains the
symlink to
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ # Docker host file systems.
+ '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
+ '&& pip install -r tools/pip-requires -r
tools/optional-requires',
+ '&& python setup.py install',
+ ])
+ c << install
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.cmd.execute('cp -r ../ryu %s/' % workdir)
+ self.build(tagname, workdir)
+ return tagname
+
+
+ def __init__(self, name, subnet='', start_ip=None, end_ip=None,
+ with_ip=True, self_ip=False,
+ fixed_ip=None, reuse=False,
+ """Manage a bridge
+ - name: bridge name
+ - subnet: network cider to be used in this bridge
+ - start_ip: start address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - end_ip: end address of an ip to be used in the subnet
+ - with_ip: specify if assign automatically an ip address
+ - self_ip: specify if assign an ip address for the bridge
+ - fixed_ip: an ip address to be assigned to the bridge
+ - reuse: specify if use an existing bridge
+ - br_type: One either in a 'docker', 'brctl' or 'ovs'
+ """
+ self.cmd = Command()
+ self.name = name
+ if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
+ raise Exception("argument error br_type: %s" % br_type)
+ self.br_type = br_type
+ self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
+ self.name = '{0}_{1}'.format(TEST_PREFIX, name)
+ self.with_ip = with_ip
+ self.subnet = netaddr.IPNetwork(subnet)
+ self.start_ip = start_ip
+ self.start_ip = netaddr.IPAddress(self.subnet.first)
+ self.end_ip = end_ip
+ self.end_ip = netaddr.IPAddress(self.subnet.last)
+
+ for host in netaddr.IPRange(self.start_ip,
+ yield host
+ self._ip_generator = _ip_gen()
+ # throw away first network address
+ self.next_ip_address()
+
+ self.self_ip = self_ip
+ self.ip_addr = fixed_ip
+ self.ip_addr = self.next_ip_address()
+ gw = "--gateway %s" % self.ip_addr.split('/')[0]
+ v6 = ''
+ v6 = '--ipv6'
+ cmd = ("docker network create --driver bridge %s "
+ "%s --subnet %s %s" % (v6, gw, subnet, self.name))
+ cmd = "ip link add {0} type bridge".format(
self.name)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ cmd = "ovs-vsctl add-br {0}".format(self.name)
+ raise ValueError('Unsupported br_type: %s' % self.br_type)
+ self.delete()
+ self.execute(cmd, sudo=True, retry=True)
+ try_several_times(f)
+ self.execute("ip link set up dev {0}".format(self.name),
+ sudo=True, retry=True)
+
+ ips = self.check_br_addr(self.name)
+ self.execute(
+ "ip addr del {0} dev {1}".format(ip,
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ sudo=True, retry=True)
+ self.execute(
+ "ip addr add {0} dev {1}".format(self.ip_addr,
self.name
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
),
+ sudo=True, retry=True)
+ self.ctns = []
+
+ out = self.execute('docker network ls', sudo=True, retry=True)
+ bridges = []
+ bridges.append(line.split()[1])
+ return bridges
+
+ out = self.execute('brctl show', retry=True)
+ bridges = []
+ bridges.append(line.split()[0])
+ return bridges
+
+ out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
+ return out.splitlines()
+
+ return self.get_bridges_dc()
+ return self.get_bridges_brctl()
+ return self.get_bridges_ovs()
+
+ return self.name in self.get_bridges()
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ return m(cmd, capture=capture)
+
+ ips = {}
+ cmd = "ip a show dev %s" % br
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[4] = elems[1]
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[6] = elems[1]
+ return ips
+
+ return "{0}/{1}".format(next(self._ip_generator),
+ self.subnet.prefixlen)
+
+ name = ctn.next_if_name()
+ self.ctns.append(ctn)
+ ip_address = None
+ ipv4 = None
+ ipv6 = None
+ ip_address = self.next_ip_address()
+ ip_address_ip = ip_address.split('/')[0]
+ version = 4
+ version = 6
+ opt_ip = "--ip %s" % ip_address_ip
+ ipv4 = ip_address
+ opt_ip = "--ip6 %s" % ip_address_ip
+ ipv6 = ip_address
+ cmd = "docker network connect %s %s %s" % (
+ opt_ip, self.name, ctn.docker_name())
+ self.execute(cmd, sudo=True)
+ ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=name)
+ ip_address = self.next_ip_address()
+ version = 4
+ version = 6
+ ctn.pipework(self, ip_address, name, version=version)
+ ctn.pipework(self, '0/0', name)
+ return ip_address
+
+ return
+ self.execute("docker network rm %s" % self.name,
+ sudo=True, retry=True)
+ self.execute("ip link set down dev %s" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ip link delete %s type bridge" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ovs-vsctl del-br %s" % self.name,
+ sudo=True, retry=True)
+
+
+ self.name = name
+ self.image = image
+ self.shared_volumes = []
+ self.ip_addrs = []
+ self.ip6_addrs = []
+ self.is_running = False
+ self.eths = []
+ self.id = None
+
+ self.cmd = Command()
+ self.remove()
+
+ return self.name
+ return '{0}_{1}'.format(TEST_PREFIX, self.name)
+
+ return self.id
+ return self.docker_name()
+
+ name = 'eth{0}'.format(len(self.eths) + 1)
+ self.eths.append(name)
+ return name
+
+ def set_addr_info(self, bridge, ipv4=None, ipv6=None,
+ self.ip_addrs.append((ifname, ipv4, bridge))
+ self.ip6_addrs.append((ifname, ipv6, bridge))
+
+ addrinfo = {}
+ ip_addrs = self.ip_addrs
+ ip_addrs = self.ip6_addrs
+ return None
+ addrinfo[addr[1]] = addr[0]
+ return addrinfo
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ return m(cmd, capture=capture)
+
+ return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
+ return self.cmd.sudo(cmd, capture=capture)
+
+ name = self.docker_name()
+ flag = '-d' if detach else ''
+ return self.dcexec('docker exec {0} {1} {2}'.format(
+ flag, name, cmd), capture=capture)
+
+ cmd = 'docker ps --no-trunc=true'
+ cmd += ' --all=true'
+ out = self.dcexec(cmd, retry=True)
+ containers = []
+ containers.append(line.split()[-1])
+ return containers
+
+ return self.docker_name() in self.get_containers(allctn=
allctn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c = CmdBuffer(' ')
+ c << "docker run --privileged=true"
+ c << "-v {0}:{1}".format(sv[0], sv[1])
+ c << "--name {0} --hostname {0} -id
{1}".format(self.docker_name()
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
,
+ self.image)
+ self.id = self.dcexec(str(c), retry=True)
+ self.is_running = True
+ self.exec_on_ctn("ip li set up dev lo")
+ ipv4 = None
+ ipv6 = None
+ for line in self.exec_on_ctn("ip a show dev
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv4 = elems[1]
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv6 = elems[1]
+ self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
+ ifname='eth0')
+ return 0
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ LOG.warning('Call run() before pipeworking')
+ return
+ c = CmdBuffer(' ')
+ c << "pipework {0}".format(bridge.name)
+
+ c << "-i {0}".format(intf_name)
+ intf_name = "eth1"
+ ipv4 = None
+ ipv6 = None
+ ipv4 = ip_addr
+ c << '-a 6'
+ ipv6 = ip_addr
+ c << "{0} {1}".format(self.docker_name(), ip_addr)
+ self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=intf_name)
+ self.execute(str(c), sudo=True, retry=True)
+
+ cmd = "docker inspect -f '{{.State.Pid}}' %s" % self.docker_name()
+ return int(self.dcexec(cmd))
+ return -1
+
+ interface = "eth0"
+ filename = "{0}/{1}.dump".format(
+ self.shared_volumes[0][1], interface)
+ self.exec_on_ctn(
+ "tcpdump -i {0} -w {1}".format(interface, filename),
+ detach=True)
+
+
+
+ WAIT_FOR_BOOT = 1
+ RETRY_INTERVAL = 5
+ DEFAULT_PEER_ARG = {'neigh_addr': '',
+ 'passwd': None,
+ 'vpn': False,
+ 'flowspec': False,
+ 'is_rs_client': False,
+ 'is_rr_client': False,
+ 'cluster_id': None,
+ 'policies': None,
+ 'passive': False,
+ 'local_addr': '',
+ 'as2': False,
+ 'graceful_restart': None,
+ 'local_as': None,
+ 'prefix_limit': None}
+ default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
+ DEFAULT_ROUTE_ARG = {'prefix': None,
+ 'rf': 'ipv4',
+ 'attr': None,
+ 'next-hop': None,
+ 'as-path': None,
+ 'community': None,
+ 'med': None,
+ 'local-pref': None,
+ 'extended-community': None,
+ 'matchs': None,
+ 'thens': None}
+ default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
+
+ self.config_dir = TEST_BASE_DIR
+ self.config_dir = os.path.join(self.config_dir,
TEST_PREFIX)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.config_dir = os.path.join(self.config_dir, name)
+ self.asn = asn
+ self.router_id = router_id
+ self.peers = {}
+ self.routes = {}
+ self.policies = {}
+ super(BGPContainer, self).__init__(name, ctn_image_name)
+ self.execute(
+ 'rm -rf {0}'.format(self.config_dir), sudo=True)
+ self.execute('mkdir -p {0}'.format(self.config_dir))
+ self.execute('chmod 777 {0}'.format(self.config_dir))
+
+ return str({'name': self.name, 'asn': self.asn,
+ 'router_id': self.router_id})
+
+ self.create_config()
+ super(BGPContainer, self).run()
+ time.sleep(w_time)
+ return w_time
+
+ def add_peer(self, peer, bridge='', reload_config=True, v6=False,
+ peer_info = peer_info or {}
+ self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
+ self.peers[peer].update(peer_info)
+ peer_keys = sorted(self.peers[peer].keys())
+ raise Exception("argument error peer_info: %s" %
peer_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ neigh_addr = ''
+ local_addr = ''
+ it = itertools.product(self.ip_addrs, peer.ip_addrs)
+ it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
+
+ continue
+ neigh_addr = you[1]
+ local_addr = me[1]
+ addr, mask = local_addr.split('/')
+ local_addr = "{0}%{1}/{2}".format(addr, me[0],
mask)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ break
+
+ raise Exception('peer {0} seems not ip
reachable'.format(peer))
+
+ self.peers[peer]['policies'] = {}
+
+ self.peers[peer]['neigh_addr'] = neigh_addr
+ self.peers[peer]['local_addr'] = local_addr
+ self.create_config()
+ self.reload_config()
+
+ del self.peers[peer]
+ self.create_config()
+ self.reload_config()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ return self.execute('cat {0}/*.log'.format(self.config_dir))
+
+ route_info = route_info or {}
+ self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
+ self.routes[route].update(route_info)
+ route_keys = sorted(self.routes[route].keys())
+ raise Exception("argument error route_info: %s" %
route_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.routes[route]['prefix'] = route
+ self.create_config()
+ self.reload_config()
+
+ def add_policy(self, policy, peer, typ, default='accept',
+ self.set_default_policy(peer, typ, default)
+ self.define_policy(policy)
+ self.assign_policy(peer, policy, typ)
+ self.create_config()
+ self.reload_config()
+
+ if (typ in ['in', 'out', 'import', 'export'] and
+ self.peers[peer]['default-policy'] = {}
+ self.peers[peer]['default-policy'][typ] = default
+ raise Exception('wrong type or default')
+
+ self.policies[policy['name']] = policy
+
+ raise Exception('peer {0} not found'.format(peer.name))
+ name = policy['name']
+ raise Exception('policy {0} not found'.format(name))
+ self.peers[peer]['policies'][typ] = policy
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ version = netaddr.IPNetwork(prefix).version
+ addr = prefix.split('/')[0]
+ ping_cmd = 'ping'
+ ping_cmd = 'ping6'
+ raise Exception(
+ 'unsupported route family: {0}'.format(version))
+ cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs
echo"'.format(
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ping_cmd, addr)
+ interval = 1
+ count = 0
+ res = self.exec_on_ctn(cmd)
+ LOG.info(res)
+ break
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+ return True
+
+ interval = 1
+ count = 0
+ state = self.get_neighbor_state(peer)
+ LOG.info("%s's peer %s state: %s",
+ self.router_id, peer.router_id, state)
+ return
+
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+
+ cmd = '/sbin/ip route add {0} via {1}'.format(network,
next_hop)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.exec_on_ctn(cmd)
+
+ cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
+ self.exec_on_ctn(cmd)
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
diff --git a/ryu/lib/docker/install_docker_test_pkg.sh
b/ryu/lib/docker/install_docker_test_pkg.sh
new file mode 100644
index 0000000..a771dfc
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+function add_docker_aptline {
+ sudo apt-get update
+ if ! apt-cache search docker-engine | grep docker-engine; then
+ VER=`lsb_release -r`
+ if echo $VER | grep 12.04; then
+ REL_NAME=precise
+ elif echo $VER | grep 14.04; then
+ REL_NAME=trusty
+ elif echo $VER | grep 15.10; then
+ REL_NAME=wily
+ elif echo $VER | grep 16.04; then
+ REL_NAME=xenial
+ else
+ retrun 1
+ fi
+ RELEASE=ubuntu-$REL_NAME
+ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.
net:80
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+ sudo sh -c "echo deb https://apt.dockerproject.org/repo
$RELEASE
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
main > /etc/apt/sources.list.d/docker.list"
+ fi
+}
+
+init_variables
+
+if [ $APTLINE_DOCKER -eq 1 ]; then
+ add_docker_aptline
+fi
+
+sudo apt-get update
+if apt-cache search docker-engine | grep docker-engine; then
+ DOCKER_PKG=docker-engine
+else
+ DOCKER_PKG=docker.io
+fi
+sudo apt-get install -y $DOCKER_PKG
+install_depends_pkg
diff --git a/ryu/lib/docker/install_docker_test_pkg_common.sh
b/ryu/lib/docker/install_docker_test_pkg_common.sh
new file mode 100644
index 0000000..44a3e10
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_common.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -ex
+
+function init_variables {
+ APTLINE_DOCKER=0
+ DIR_BASE=/tmp
+}
+
+function process_options {
+ local max
+ local i
+ max=$#
+ i=1
+ while [ $i -le $max ]; do
+ case "$1" in
+ -a|--add-docker-aptline)
+ APTLINE_DOCKER=1
+ ;;
+ -d|--download-dir)
+ shift; ((i++))
+ DIR_BASE=$1
+ ;;
+ esac
+ shift; ((i++))
+ done
+}
+
+function install_pipework {
+ if ! which /usr/local/bin/pipework >/dev/null
+ then
+ sudo rm -rf $DIR_BASE/pipework
+ git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework
+ sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
+ fi
+}
+
+function install_depends_pkg {
+ install_pipework
+}
diff --git a/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
new file mode 100644
index 0000000..d8c3b49
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+init_variables
+
+sudo apt-get update
+install_depends_pkg
diff --git a/ryu/lib/docker/quagga.py b/ryu/lib/docker/quagga.py
new file mode 100644
index 0000000..9b6d218
--- /dev/null
+++ b/ryu/lib/docker/quagga.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/quagga.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import netaddr
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/quagga'
+
+ def __init__(self, name, asn, router_id, ctn_image_name,
+ super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.zebra = zebra
+ self._create_config_debian()
+
+ w_time = super(QuaggaBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ rib = []
+ return self.get_global_rib_with_prefix(prefix, rf)
+
+ out = self.vtysh('show bgp {0} unicast'.format(rf),
config=False)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return rib
+
+ read_next = False
+
+ ibgp = False
+ line = line[2:]
+ line = line[1:]
+ ibgp = True
+ continue
+
+ elems = line.split()
+
+ read_next = True
+ prefix = elems[0]
+ continue
+ nexthop = elems[0]
+ prefix = elems[0]
+ nexthop = elems[1]
+ read_next = False
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'ibgp': ibgp})
+
+ return rib
+
+ rib = []
+
+ lines = [line.strip() for line in self.vtysh(
+ 'show bgp {0} unicast {1}'.format(rf, prefix),
+ config=False).split('\n')]
+
+ return rib
+
+ lines = lines[2:]
+
+ lines.pop(0) # another useless line
+ elif lines[0].startswith('Advertised to non peer-group
+ lines = lines[2:] # other useless lines
+ raise Exception('unknown output format {0}'.format(lines))
+
+ aspath = []
+ aspath = [int(asn) for asn in lines[0].split()]
+
+ nexthop = lines[1].split()[0].strip()
+ info = [s.strip(',') for s in lines[2].split()]
+ attrs = []
+ med = info[info.index('metric') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
+ 'metric': int(med)})
+ localpref = info[info.index('localpref') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
+ 'value': int(localpref)})
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'aspath': aspath, 'attrs': attrs})
+
+ return rib
+
+ raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
+
+ info = [l.strip() for l in self.vtysh(
+ 'show bgp neighbors {0}'.format(neigh_addr),
+ config=False).split('\n')]
+
+ raise Exception('unknown format')
+
+ idx1 = info[0].index('BGP neighbor is ')
+ idx2 = info[0].index(',')
+ n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
+ idx1 = info[2].index('= ')
+ state = info[2][idx1 + len('= '):]
+ return base.BGP_FSM_IDLE
+ return base.BGP_FSM_ACTIVE
+ return base.BGP_FSM_ESTABLISHED
+ return state
+
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ self.vtysh('clear ip bgp * soft', config=False)
+
+ zebra = 'no'
+ self._create_config_bgp()
+ zebra = 'yes'
+ self._create_config_zebra()
+ self._create_config_daemons(zebra)
+
+ c = base.CmdBuffer()
+ c << 'vtysh_enable=yes'
+ c << 'zebra_options=" --daemon -A 127.0.0.1"'
+ c << 'bgpd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospfd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospf6d_options=" --daemon -A ::1"'
+ c << 'ripd_options=" --daemon -A 127.0.0.1"'
+ c << 'ripngd_options=" --daemon -A ::1"'
+ c << 'isisd_options=" --daemon -A 127.0.0.1"'
+ c << 'babeld_options=" --daemon -A 127.0.0.1"'
+ c << 'watchquagga_enable=yes'
+ c << 'watchquagga_options=(--daemon)'
+ with open('{0}/debian.conf'.format(self.config_dir), 'w') as
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'zebra=%s' % zebra
+ c << 'bgpd=yes'
+ c << 'ospfd=no'
+ c << 'ospf6d=no'
+ c << 'ripd=no'
+ c << 'ripngd=no'
+ c << 'isisd=no'
+ c << 'babeld=no'
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+
+ c = base.CmdBuffer()
+ c << 'hostname bgpd'
+ c << 'password zebra'
+ c << 'router bgp {0}'.format(self.asn)
+ c << 'bgp router-id {0}'.format(self.router_id)
+ if any(info['graceful_restart'] for info in
+ c << 'bgp graceful-restart'
+
+ version = 4
+ version = netaddr.IPNetwork(info['neigh_addr']).version
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << 'no bgp default ipv4-unicast'
+
+ c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
+ c << 'neighbor {0} route-server-client'.format(n_
addr)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
+ typ)
+ c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
+ c << 'neighbor {0} passive'.format(n_addr)
+ c << 'address-family ipv6 unicast'
+ c << 'neighbor {0} activate'.format(n_addr)
+ c << 'exit-address-family'
+
+ c << 'network {0}'.format(route['prefix'])
+ c << 'address-family ipv6 unicast'
+ c << 'network {0}'.format(route['prefix'])
+ c << 'exit-address-family'
+ raise Exception(
{0}'.format(route['rf']))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c << 'address-family ipv6 unicast'
+ c << 'redistribute connected'
+ c << 'exit-address-family'
+ c << 'redistribute connected'
+
+ c << 'access-list {0} {1} {2}'.format(name,
policy['type'],
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ policy['match'])
+ c << 'route-map {0} permit 10'.format(name)
+ c << 'match ip address {0}'.format(name)
+ c << 'set metric {0}'.format(policy['med'])
+
+ c << 'debug bgp as4'
+ c << 'debug bgp fsm'
+ c << 'debug bgp updates'
+ c << 'debug bgp events'
+ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
+
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'hostname zebra'
+ c << 'password zebra'
+ c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
+ c << 'debug zebra packet'
+ c << 'debug zebra kernel'
+ c << 'debug zebra rib'
+ c << ''
+
+ with open('{0}/zebra.conf'.format(self.config_dir), 'w') as
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ cmd = [cmd]
+ cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
+ return self.exec_on_ctn(
+ "vtysh -d bgpd -c 'en' -c 'conf t' -c "
+ "'router bgp {0}' {1}".format(self.asn, cmd),
+ capture=True)
+ return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
+ capture=True)
+
+ daemon = []
+ daemon.append('bgpd')
+ daemon.append('zebra')
+ cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
+ self.exec_on_ctn(cmd, capture=True)
+
+
+ def __init__(self, name, config, ctn_image_name,
+ asn = None
+ router_id = None
+ line = line.strip()
+ asn = int(line[len('router bgp'):].strip())
+ router_id = line[len('bgp router-id'):].strip()
+ raise Exception('asn not in quagga config')
+ raise Exception('router-id not in quagga config')
+ self.config = config
+ super(RawQuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ctn_image_name,
zebra)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ with open(os.path.join(self.config_dir, 'bgpd.conf'), 'w')
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(self.config)
+ f.writelines(self.config)
diff --git a/ryu/lib/docker/ryubgp.py b/ryu/lib/docker/ryubgp.py
new file mode 100644
index 0000000..8fe16f4
--- /dev/null
+++ b/ryu/lib/docker/ryubgp.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import time
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/ryu'
+
+ super(RyuBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
+ self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
+ self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
+ self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c = base.CmdBuffer()
+ c << '[DEFAULT]'
+ c << 'verbose=True'
+ c << 'log_file=/etc/ryu/manager.log'
+ LOG.info("[%s's new config]" % self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'import os'
+ c << ''
+ c << 'BGP = {'
+ c << " 'local_as': %s," % str(self.asn)
+ c << " 'router_id': '%s'," % self.router_id
+ c << " 'neighbors': ["
+ c << " {"
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << " 'address': '%s'," % n_addr
+ c << " 'remote_as': %s," % str(peer.asn)
+ c << " 'enable_ipv4': True,"
+ c << " 'enable_ipv6': True,"
+ c << " 'enable_vpnv4': True,"
+ c << " 'enable_vpnv6': True,"
+ c << ' },'
+ c << ' ],'
+ c << " 'routes': ["
+ c << " {"
+ c << " 'prefix': '%s'," % route['prefix']
+ c << " },"
+ c << " ],"
+ c << "}"
+ log_conf = """LOGGING = {
+
+ # We use python logging package for logging.
+ 'version': 1,
+ 'disable_existing_loggers': False,
+
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s ' +
+ '[%(process)d %(thread)d] %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(asctime)s %(module)s
%(lineno)s ' +
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ '%(message)s'
+ },
+ 'stats': {
+ 'format': '%(message)s'
+ },
+ },
+
+ 'handlers': {
+ # Outputs log to console.
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ 'console_stats': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'stats'
+ },
+ # Rotates log file when its size reaches 10MB.
+ 'log_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'bgpspeaker.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'verbose'
+ },
+ 'stats_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'statistics_bgps.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'stats'
+ },
+ },
+
+ # Fine-grained control of logging per instance.
+ 'loggers': {
+ 'bgpspeaker': {
+ 'handlers': ['console', 'log_file'],
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'stats': {
+ 'handlers': ['stats_file', 'console_stats'],
+ 'level': 'INFO',
+ 'propagate': False,
+ 'formatter': 'stats',
+ },
+ },
+
+ # Root loggers.
+ 'root': {
+ 'handlers': ['console', 'log_file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+}"""
+ c << log_conf
+ with open(os.path.join(self.config_dir, 'bgp_conf.py'), 'w')
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ self._create_config_ryu()
+ self._create_config_ryu_bgp()
+
+ results = self.exec_on_ctn('ps ax')
+ running = False
+ running = True
+ return running
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = "ryu-manager --verbose "
+ cmd += "--config-file %s " % self.SHARED_RYU_CONF
+ cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
+ cmd += "ryu.services.protocols.bgp.application"
+ self.exec_on_ctn(cmd, detach=True)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
+ self.exec_on_ctn(cmd)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ w_time = super(RyuBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ self.stop_ryubgp(retry=True)
+ self.start_ryubgp(retry=True)
diff --git a/ryu/tests/integrated/common/__init__.py
b/ryu/tests/integrated/common/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/ryu/tests/integrated/common/docker_base.py
b/ryu/tests/integrated/common/docker_base.py
deleted file mode 100644
index 1ae2cc2..0000000
--- a/ryu/tests/integrated/common/docker_base.py
+++ /dev/null
@@ -1,801 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/base.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import itertools
-import logging
-import os
-import subprocess
-import time
-
-import netaddr
-import six
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_TEST_PREFIX = ''
-DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
-TEST_PREFIX = DEFAULT_TEST_PREFIX
-TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
-
-BGP_FSM_IDLE = 'BGP_FSM_IDLE'
-BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
-BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
-
-BGP_ATTR_TYPE_ORIGIN = 1
-BGP_ATTR_TYPE_AS_PATH = 2
-BGP_ATTR_TYPE_NEXT_HOP = 3
-BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
-BGP_ATTR_TYPE_LOCAL_PREF = 5
-BGP_ATTR_TYPE_COMMUNITIES = 8
-BGP_ATTR_TYPE_ORIGINATOR_ID = 9
-BGP_ATTR_TYPE_CLUSTER_LIST = 10
-BGP_ATTR_TYPE_MP_REACH_NLRI = 14
-BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
-
-BRIDGE_TYPE_DOCKER = 'docker'
-BRIDGE_TYPE_BRCTL = 'brctl'
-BRIDGE_TYPE_OVS = 'ovs'
-
-
- super(CommandError, self).__init__()
- self.out = out
-
-
- e = RuntimeError()
- r = f()
- time.sleep(s)
- return r
- raise e
-
-
- super(CmdBuffer, self).__init__()
- self.delim = delim
-
- self.append(value)
-
- return self.delim.join(self)
-
-
-
- stdout = stdout or ''
- obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
- obj.stderr = stderr or ''
- obj.command = command
- obj.returncode = returncode
- return obj
-
-
-
- """Execute a command using subprocess.Popen()
- - out: stdout from subprocess.Popen()
- out has some attributes.
- out.returncode: returncode of subprocess.Popen()
- out.stderr: stderr from subprocess.Popen()
- """
- p_stdout = subprocess.PIPE
- p_stderr = subprocess.PIPE
- p_stdout = None
- p_stderr = None
- pop = subprocess.Popen(cmd, shell=True, executable=executable,
- stdout=p_stdout,
- stderr=p_stderr)
- __stdout, __stderr = pop.communicate()
- _stdout = six.text_type(__stdout, 'utf-8')
- _stderr = six.text_type(__stderr, 'utf-8')
- out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
- return out
-
- out = None
- out = self._execute(cmd, capture=capture)
- LOG.info(out.command)
- return out
- LOG.error("stdout: %s", out)
- LOG.error("stderr: %s", out.stderr)
- break
- time.sleep(interval)
- raise CommandError(out)
-
- cmd = 'sudo %s' % cmd
- return self.execute(cmd, capture=capture,
- try_times=try_times, interval=interval)
-
-
- self.baseimage = baseimage
- self.cmd = Command()
-
- out = self.cmd.sudo('sudo docker images')
- images = []
- images.append(line.split()[0])
- return images
-
- return name in self.get_images()
-
- self.cmd.sudo(
- "docker build -t {0} {1}".format(tagname, dockerfile_dir),
- try_times=3)
-
- return tagname
- self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
-
- def create_quagga(self, tagname='quagga', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- pkges = ' '.join([
- 'telnet',
- 'tcpdump',
- 'quagga',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'RUN apt-get update'
- c << 'RUN apt-get install -qy --no-install-recommends %s' %
pkges
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'CMD /usr/lib/quagga/bgpd'
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.build(tagname, workdir)
- return tagname
-
- def create_ryu(self, tagname='ryu', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- workdir_ctn = '/root/osrg/ryu'
- pkges = ' '.join([
- 'tcpdump',
- 'iproute2',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'ADD ryu %s' % workdir_ctn
- install = ' '.join([
- 'RUN apt-get update',
- '&& apt-get install -qy --no-install-recommends %s' %
pkges,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- '&& cd %s' % workdir_ctn,
- # Note: Clean previous builds, because "python setup.py install"
- # might fail if the current directory contains the
symlink to
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- # Docker host file systems.
- '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
- '&& pip install -r tools/pip-requires -r
tools/optional-requires',
- '&& python setup.py install',
- ])
- c << install
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.cmd.execute('cp -r ../ryu %s/' % workdir)
- self.build(tagname, workdir)
- return tagname
-
-
- def __init__(self, name, subnet='', start_ip=None, end_ip=None,
- with_ip=True, self_ip=False,
- fixed_ip=None, reuse=False,
- """Manage a bridge
- - name: bridge name
- - subnet: network cider to be used in this bridge
- - start_ip: start address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - end_ip: end address of an ip to be used in the subnet
- - with_ip: specify if assign automatically an ip address
- - self_ip: specify if assign an ip address for the bridge
- - fixed_ip: an ip address to be assigned to the bridge
- - reuse: specify if use an existing bridge
- - br_type: One either in a 'docker', 'brctl' or 'ovs'
- """
- self.cmd = Command()
- self.name = name
- if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
- raise Exception("argument error br_type: %s" % br_type)
- self.br_type = br_type
- self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
- self.name = '{0}_{1}'.format(TEST_PREFIX, name)
- self.with_ip = with_ip
- self.subnet = netaddr.IPNetwork(subnet)
- self.start_ip = start_ip
- self.start_ip = netaddr.IPAddress(self.subnet.first)
- self.end_ip = end_ip
- self.end_ip = netaddr.IPAddress(self.subnet.last)
-
- for host in netaddr.IPRange(self.start_ip,
- yield host
- self._ip_generator = _ip_gen()
- # throw away first network address
- self.next_ip_address()
-
- self.self_ip = self_ip
- self.ip_addr = fixed_ip
- self.ip_addr = self.next_ip_address()
- gw = "--gateway %s" % self.ip_addr.split('/')[0]
- v6 = ''
- v6 = '--ipv6'
- cmd = ("docker network create --driver bridge %s "
- "%s --subnet %s %s" % (v6, gw, subnet, self.name))
- cmd = "ip link add {0} type bridge".format(
self.name)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- cmd = "ovs-vsctl add-br {0}".format(self.name)
- raise ValueError('Unsupported br_type: %s' % self.br_type)
- self.delete()
- self.execute(cmd, sudo=True, retry=True)
- try_several_times(f)
- self.execute("ip link set up dev {0}".format(self.name),
- sudo=True, retry=True)
-
- ips = self.check_br_addr(self.name)
- self.execute(
- "ip addr del {0} dev {1}".format(ip,
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- sudo=True, retry=True)
- self.execute(
- "ip addr add {0} dev {1}".format(self.ip_addr,
self.name
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
),
- sudo=True, retry=True)
- self.ctns = []
-
- out = self.execute('docker network ls', sudo=True, retry=True)
- bridges = []
- bridges.append(line.split()[1])
- return bridges
-
- out = self.execute('brctl show', retry=True)
- bridges = []
- bridges.append(line.split()[0])
- return bridges
-
- out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
- return out.splitlines()
-
- return self.get_bridges_dc()
- return self.get_bridges_brctl()
- return self.get_bridges_ovs()
-
- return self.name in self.get_bridges()
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3, interval=1)
- return m(cmd, capture=capture)
-
- ips = {}
- cmd = "ip a show dev %s" % br
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[4] = elems[1]
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[6] = elems[1]
- return ips
-
- return "{0}/{1}".format(next(self._ip_generator),
- self.subnet.prefixlen)
-
- name = ctn.next_if_name()
- self.ctns.append(ctn)
- ip_address = None
- ipv4 = None
- ipv6 = None
- ip_address = self.next_ip_address()
- ip_address_ip = ip_address.split('/')[0]
- version = 4
- version = 6
- opt_ip = "--ip %s" % ip_address_ip
- ipv4 = ip_address
- opt_ip = "--ip6 %s" % ip_address_ip
- ipv6 = ip_address
- cmd = "docker network connect %s %s %s" % (
- opt_ip, self.name, ctn.docker_name())
- self.execute(cmd, sudo=True)
- ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
- ifname=name)
- ip_address = self.next_ip_address()
- version = 4
- version = 6
- ctn.pipework(self, ip_address, name, version=version)
- ctn.pipework(self, '0/0', name)
- return ip_address
-
- return
- self.execute("docker network rm %s" % self.name,
- sudo=True, retry=True)
- self.execute("ip link set down dev %s" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ip link delete %s type bridge" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ovs-vsctl del-br %s" % self.name,
- sudo=True, retry=True)
-
-
- self.name = name
- self.image = image
- self.shared_volumes = []
- self.ip_addrs = []
- self.ip6_addrs = []
- self.is_running = False
- self.eths = []
- self.id = None
-
- self.cmd = Command()
- self.remove()
-
- return self.name
- return '{0}_{1}'.format(TEST_PREFIX, self.name)
-
- return self.id
- return self.docker_name()
-
- name = 'eth{0}'.format(len(self.eths) + 1)
- self.eths.append(name)
- return name
-
- def set_addr_info(self, bridge, ipv4=None, ipv6=None,
- self.ip_addrs.append((ifname, ipv4, bridge))
- self.ip6_addrs.append((ifname, ipv6, bridge))
-
- addrinfo = {}
- ip_addrs = self.ip_addrs
- ip_addrs = self.ip6_addrs
- return None
- addrinfo[addr[1]] = addr[0]
- return addrinfo
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3, interval=1)
- return m(cmd, capture=capture)
-
- return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
- return self.cmd.sudo(cmd, capture=capture)
-
- name = self.docker_name()
- flag = '-d' if detach else ''
- return self.dcexec('docker exec {0} {1} {2}'.format(
- flag, name, cmd), capture=capture)
-
- cmd = 'docker ps --no-trunc=true'
- cmd += ' --all=true'
- out = self.dcexec(cmd, retry=True)
- containers = []
- containers.append(line.split()[-1])
- return containers
-
- return self.docker_name() in self.get_containers(allctn=
allctn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c = CmdBuffer(' ')
- c << "docker run --privileged=true"
- c << "-v {0}:{1}".format(sv[0], sv[1])
- c << "--name {0} --hostname {0} -id
{1}".format(self.docker_name()
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
,
- self.image)
- self.id = self.dcexec(str(c), retry=True)
- self.is_running = True
- self.exec_on_ctn("ip li set up dev lo")
- ipv4 = None
- ipv6 = None
- for line in self.exec_on_ctn("ip a show dev
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv4 = elems[1]
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv6 = elems[1]
- self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
- ifname='eth0')
- return 0
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- LOG.warning('Call run() before pipeworking')
- return
- c = CmdBuffer(' ')
- c << "pipework {0}".format(bridge.name)
-
- c << "-i {0}".format(intf_name)
- intf_name = "eth1"
- ipv4 = None
- ipv6 = None
- ipv4 = ip_addr
- c << '-a 6'
- ipv6 = ip_addr
- c << "{0} {1}".format(self.docker_name(), ip_addr)
- self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
- ifname=intf_name)
- self.execute(str(c), sudo=True, retry=True)
-
- cmd = "docker inspect -f '{{.State.Pid}}' %s" % self.docker_name()
- return int(self.dcexec(cmd))
- return -1
-
- interface = "eth0"
- filename = "{0}/{1}.dump".format(
- self.shared_volumes[0][1], interface)
- self.exec_on_ctn(
- "tcpdump -i {0} -w {1}".format(interface, filename),
- detach=True)
-
-
-
- WAIT_FOR_BOOT = 1
- RETRY_INTERVAL = 5
- DEFAULT_PEER_ARG = {'neigh_addr': '',
- 'passwd': None,
- 'vpn': False,
- 'flowspec': False,
- 'is_rs_client': False,
- 'is_rr_client': False,
- 'cluster_id': None,
- 'policies': None,
- 'passive': False,
- 'local_addr': '',
- 'as2': False,
- 'graceful_restart': None,
- 'local_as': None,
- 'prefix_limit': None}
- default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
- DEFAULT_ROUTE_ARG = {'prefix': None,
- 'rf': 'ipv4',
- 'attr': None,
- 'next-hop': None,
- 'as-path': None,
- 'community': None,
- 'med': None,
- 'local-pref': None,
- 'extended-community': None,
- 'matchs': None,
- 'thens': None}
- default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
-
- self.config_dir = TEST_BASE_DIR
- self.config_dir = os.path.join(self.config_dir,
TEST_PREFIX)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.config_dir = os.path.join(self.config_dir, name)
- self.asn = asn
- self.router_id = router_id
- self.peers = {}
- self.routes = {}
- self.policies = {}
- super(BGPContainer, self).__init__(name, ctn_image_name)
- self.execute(
- 'rm -rf {0}'.format(self.config_dir), sudo=True)
- self.execute('mkdir -p {0}'.format(self.config_dir))
- self.execute('chmod 777 {0}'.format(self.config_dir))
-
- return str({'name': self.name, 'asn': self.asn,
- 'router_id': self.router_id})
-
- self.create_config()
- super(BGPContainer, self).run()
- time.sleep(w_time)
- return w_time
-
- def add_peer(self, peer, bridge='', reload_config=True, v6=False,
- peer_info = peer_info or {}
- self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
- self.peers[peer].update(peer_info)
- peer_keys = sorted(self.peers[peer].keys())
- raise Exception("argument error peer_info: %s" %
peer_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- neigh_addr = ''
- local_addr = ''
- it = itertools.product(self.ip_addrs, peer.ip_addrs)
- it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
-
- continue
- neigh_addr = you[1]
- local_addr = me[1]
- addr, mask = local_addr.split('/')
- local_addr = "{0}%{1}/{2}".format(addr, me[0],
mask)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- break
-
- raise Exception('peer {0} seems not ip
reachable'.format(peer))
-
- self.peers[peer]['policies'] = {}
-
- self.peers[peer]['neigh_addr'] = neigh_addr
- self.peers[peer]['local_addr'] = local_addr
- self.create_config()
- self.reload_config()
-
- del self.peers[peer]
- self.create_config()
- self.reload_config()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- return self.execute('cat {0}/*.log'.format(self.config_dir))
-
- route_info = route_info or {}
- self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
- self.routes[route].update(route_info)
- route_keys = sorted(self.routes[route].keys())
- raise Exception("argument error route_info: %s" %
route_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.routes[route]['prefix'] = route
- self.create_config()
- self.reload_config()
-
- def add_policy(self, policy, peer, typ, default='accept',
- self.set_default_policy(peer, typ, default)
- self.define_policy(policy)
- self.assign_policy(peer, policy, typ)
- self.create_config()
- self.reload_config()
-
- if (typ in ['in', 'out', 'import', 'export'] and
- self.peers[peer]['default-policy'] = {}
- self.peers[peer]['default-policy'][typ] = default
- raise Exception('wrong type or default')
-
- self.policies[policy['name']] = policy
-
- raise Exception('peer {0} not found'.format(peer.name))
- name = policy['name']
- raise Exception('policy {0} not found'.format(name))
- self.peers[peer]['policies'][typ] = policy
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- version = netaddr.IPNetwork(prefix).version
- addr = prefix.split('/')[0]
- ping_cmd = 'ping'
- ping_cmd = 'ping6'
- raise Exception(
- 'unsupported route family: {0}'.format(version))
- cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs
echo"'.format(
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ping_cmd, addr)
- interval = 1
- count = 0
- res = self.exec_on_ctn(cmd)
- LOG.info(res)
- break
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
- return True
-
- interval = 1
- count = 0
- state = self.get_neighbor_state(peer)
- LOG.info("%s's peer %s state: %s",
- self.router_id, peer.router_id, state)
- return
-
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
-
- cmd = '/sbin/ip route add {0} via {1}'.format(network,
next_hop)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.exec_on_ctn(cmd)
-
- cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
- self.exec_on_ctn(cmd)
-
- raise NotImplementedError()
-
- raise NotImplementedError()
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg.sh
b/ryu/tests/integrated/common/install_docker_test_pkg.sh
deleted file mode 100644
index a771dfc..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-function add_docker_aptline {
- sudo apt-get update
- if ! apt-cache search docker-engine | grep docker-engine; then
- VER=`lsb_release -r`
- if echo $VER | grep 12.04; then
- REL_NAME=precise
- elif echo $VER | grep 14.04; then
- REL_NAME=trusty
- elif echo $VER | grep 15.10; then
- REL_NAME=wily
- elif echo $VER | grep 16.04; then
- REL_NAME=xenial
- else
- retrun 1
- fi
- RELEASE=ubuntu-$REL_NAME
- sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.
net:80
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
- sudo sh -c "echo deb https://apt.dockerproject.org/repo
$RELEASE
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
main > /etc/apt/sources.list.d/docker.list"
- fi
-}
-
-init_variables
-
-if [ $APTLINE_DOCKER -eq 1 ]; then
- add_docker_aptline
-fi
-
-sudo apt-get update
-if apt-cache search docker-engine | grep docker-engine; then
- DOCKER_PKG=docker-engine
-else
- DOCKER_PKG=docker.io
-fi
-sudo apt-get install -y $DOCKER_PKG
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_
common.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
deleted file mode 100644
index 44a3e10..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-set -ex
-
-function init_variables {
- APTLINE_DOCKER=0
- DIR_BASE=/tmp
-}
-
-function process_options {
- local max
- local i
- max=$#
- i=1
- while [ $i -le $max ]; do
- case "$1" in
- -a|--add-docker-aptline)
- APTLINE_DOCKER=1
- ;;
- -d|--download-dir)
- shift; ((i++))
- DIR_BASE=$1
- ;;
- esac
- shift; ((i++))
- done
-}
-
-function install_pipework {
- if ! which /usr/local/bin/pipework >/dev/null
- then
- sudo rm -rf $DIR_BASE/pipework
- git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework
- sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
- fi
-}
-
-function install_depends_pkg {
- install_pipework
-}
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
deleted file mode 100644
index d8c3b49..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-init_variables
-
-sudo apt-get update
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/quagga.py
b/ryu/tests/integrated/common/quagga.py
deleted file mode 100644
index 9b6d218..0000000
--- a/ryu/tests/integrated/common/quagga.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/quagga.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-
-import netaddr
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/quagga'
-
- def __init__(self, name, asn, router_id, ctn_image_name,
- super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.zebra = zebra
- self._create_config_debian()
-
- w_time = super(QuaggaBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- rib = []
- return self.get_global_rib_with_prefix(prefix, rf)
-
- out = self.vtysh('show bgp {0} unicast'.format(rf),
config=False)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return rib
-
- read_next = False
-
- ibgp = False
- line = line[2:]
- line = line[1:]
- ibgp = True
- continue
-
- elems = line.split()
-
- read_next = True
- prefix = elems[0]
- continue
- nexthop = elems[0]
- prefix = elems[0]
- nexthop = elems[1]
- read_next = False
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'ibgp': ibgp})
-
- return rib
-
- rib = []
-
- lines = [line.strip() for line in self.vtysh(
- 'show bgp {0} unicast {1}'.format(rf, prefix),
- config=False).split('\n')]
-
- return rib
-
- lines = lines[2:]
-
- lines.pop(0) # another useless line
- elif lines[0].startswith('Advertised to non peer-group
- lines = lines[2:] # other useless lines
- raise Exception('unknown output format {0}'.format(lines))
-
- aspath = []
- aspath = [int(asn) for asn in lines[0].split()]
-
- nexthop = lines[1].split()[0].strip()
- info = [s.strip(',') for s in lines[2].split()]
- attrs = []
- med = info[info.index('metric') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
- 'metric': int(med)})
- localpref = info[info.index('localpref') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
- 'value': int(localpref)})
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'aspath': aspath, 'attrs': attrs})
-
- return rib
-
- raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
-
- info = [l.strip() for l in self.vtysh(
- 'show bgp neighbors {0}'.format(neigh_addr),
- config=False).split('\n')]
-
- raise Exception('unknown format')
-
- idx1 = info[0].index('BGP neighbor is ')
- idx2 = info[0].index(',')
- n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
- idx1 = info[2].index('= ')
- state = info[2][idx1 + len('= '):]
- return base.BGP_FSM_IDLE
- return base.BGP_FSM_ACTIVE
- return base.BGP_FSM_ESTABLISHED
- return state
-
- raise Exception('not found peer {0}'.format(peer.router_id))
-
- self.vtysh('clear ip bgp * soft', config=False)
-
- zebra = 'no'
- self._create_config_bgp()
- zebra = 'yes'
- self._create_config_zebra()
- self._create_config_daemons(zebra)
-
- c = base.CmdBuffer()
- c << 'vtysh_enable=yes'
- c << 'zebra_options=" --daemon -A 127.0.0.1"'
- c << 'bgpd_options=" --daemon -A 127.0.0.1"'
- c << 'ospfd_options=" --daemon -A 127.0.0.1"'
- c << 'ospf6d_options=" --daemon -A ::1"'
- c << 'ripd_options=" --daemon -A 127.0.0.1"'
- c << 'ripngd_options=" --daemon -A ::1"'
- c << 'isisd_options=" --daemon -A 127.0.0.1"'
- c << 'babeld_options=" --daemon -A 127.0.0.1"'
- c << 'watchquagga_enable=yes'
- c << 'watchquagga_options=(--daemon)'
- with open('{0}/debian.conf'.format(self.config_dir), 'w') as
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'zebra=%s' % zebra
- c << 'bgpd=yes'
- c << 'ospfd=no'
- c << 'ospf6d=no'
- c << 'ripd=no'
- c << 'ripngd=no'
- c << 'isisd=no'
- c << 'babeld=no'
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
-
- c = base.CmdBuffer()
- c << 'hostname bgpd'
- c << 'password zebra'
- c << 'router bgp {0}'.format(self.asn)
- c << 'bgp router-id {0}'.format(self.router_id)
- if any(info['graceful_restart'] for info in
- c << 'bgp graceful-restart'
-
- version = 4
- version = netaddr.IPNetwork(info['neigh_addr']).version
- n_addr = info['neigh_addr'].split('/')[0]
- c << 'no bgp default ipv4-unicast'
-
- c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
- c << 'neighbor {0} route-server-client'.format(n_
addr)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
- typ)
- c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
- c << 'neighbor {0} passive'.format(n_addr)
- c << 'address-family ipv6 unicast'
- c << 'neighbor {0} activate'.format(n_addr)
- c << 'exit-address-family'
-
- c << 'network {0}'.format(route['prefix'])
- c << 'address-family ipv6 unicast'
- c << 'network {0}'.format(route['prefix'])
- c << 'exit-address-family'
- raise Exception(
{0}'.format(route['rf']))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c << 'address-family ipv6 unicast'
- c << 'redistribute connected'
- c << 'exit-address-family'
- c << 'redistribute connected'
-
- c << 'access-list {0} {1} {2}'.format(name,
policy['type'],
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- policy['match'])
- c << 'route-map {0} permit 10'.format(name)
- c << 'match ip address {0}'.format(name)
- c << 'set metric {0}'.format(policy['med'])
-
- c << 'debug bgp as4'
- c << 'debug bgp fsm'
- c << 'debug bgp updates'
- c << 'debug bgp events'
- c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
-
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'hostname zebra'
- c << 'password zebra'
- c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
- c << 'debug zebra packet'
- c << 'debug zebra kernel'
- c << 'debug zebra rib'
- c << ''
-
- with open('{0}/zebra.conf'.format(self.config_dir), 'w') as
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- cmd = [cmd]
- cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
- return self.exec_on_ctn(
- "vtysh -d bgpd -c 'en' -c 'conf t' -c "
- "'router bgp {0}' {1}".format(self.asn, cmd),
- capture=True)
- return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
- capture=True)
-
- daemon = []
- daemon.append('bgpd')
- daemon.append('zebra')
- cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
- self.exec_on_ctn(cmd, capture=True)
-
-
- def __init__(self, name, config, ctn_image_name,
- asn = None
- router_id = None
- line = line.strip()
- asn = int(line[len('router bgp'):].strip())
- router_id = line[len('bgp router-id'):].strip()
- raise Exception('asn not in quagga config')
- raise Exception('router-id not in quagga config')
- self.config = config
- super(RawQuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ctn_image_name,
zebra)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- with open(os.path.join(self.config_dir, 'bgpd.conf'), 'w')
- LOG.info("[%s's new config]", self.name)
- LOG.info(self.config)
- f.writelines(self.config)
diff --git a/ryu/tests/integrated/common/ryubgp.py
b/ryu/tests/integrated/common/ryubgp.py
deleted file mode 100644
index 8fe16f4..0000000
--- a/ryu/tests/integrated/common/ryubgp.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-import time
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/ryu'
-
- super(RyuBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
- self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
- self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
- self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c = base.CmdBuffer()
- c << '[DEFAULT]'
- c << 'verbose=True'
- c << 'log_file=/etc/ryu/manager.log'
- LOG.info("[%s's new config]" % self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'import os'
- c << ''
- c << 'BGP = {'
- c << " 'local_as': %s," % str(self.asn)
- c << " 'router_id': '%s'," % self.router_id
- c << " 'neighbors': ["
- c << " {"
- n_addr = info['neigh_addr'].split('/')[0]
- c << " 'address': '%s'," % n_addr
- c << " 'remote_as': %s," % str(peer.asn)
- c << " 'enable_ipv4': True,"
- c << " 'enable_ipv6': True,"
- c << " 'enable_vpnv4': True,"
- c << " 'enable_vpnv6': True,"
- c << ' },'
- c << ' ],'
- c << " 'routes': ["
- c << " {"
- c << " 'prefix': '%s'," % route['prefix']
- c << " },"
- c << " ],"
- c << "}"
- log_conf = """LOGGING = {
-
- # We use python logging package for logging.
- 'version': 1,
- 'disable_existing_loggers': False,
-
- 'formatters': {
- 'verbose': {
- 'format': '%(levelname)s %(asctime)s %(module)s ' +
- '[%(process)d %(thread)d] %(message)s'
- },
- 'simple': {
- 'format': '%(levelname)s %(asctime)s %(module)s
%(lineno)s ' +
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- '%(message)s'
- },
- 'stats': {
- 'format': '%(message)s'
- },
- },
-
- 'handlers': {
- # Outputs log to console.
- 'console': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'simple'
- },
- 'console_stats': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'stats'
- },
- # Rotates log file when its size reaches 10MB.
- 'log_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'bgpspeaker.log'),
- 'maxBytes': '10000000',
- 'formatter': 'verbose'
- },
- 'stats_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'statistics_bgps.log'),
- 'maxBytes': '10000000',
- 'formatter': 'stats'
- },
- },
-
- # Fine-grained control of logging per instance.
- 'loggers': {
- 'bgpspeaker': {
- 'handlers': ['console', 'log_file'],
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'stats': {
- 'handlers': ['stats_file', 'console_stats'],
- 'level': 'INFO',
- 'propagate': False,
- 'formatter': 'stats',
- },
- },
-
- # Root loggers.
- 'root': {
- 'handlers': ['console', 'log_file'],
- 'level': 'DEBUG',
- 'propagate': True,
- },
-}"""
- c << log_conf
- with open(os.path.join(self.config_dir, 'bgp_conf.py'), 'w')
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- self._create_config_ryu()
- self._create_config_ryu_bgp()
-
- results = self.exec_on_ctn('ps ax')
- running = False
- running = True
- return running
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = "ryu-manager --verbose "
- cmd += "--config-file %s " % self.SHARED_RYU_CONF
- cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
- cmd += "ryu.services.protocols.bgp.application"
- self.exec_on_ctn(cmd, detach=True)
- result = True
- break
- time.sleep(1)
- return result
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
- self.exec_on_ctn(cmd)
- result = True
- break
- time.sleep(1)
- return result
-
- w_time = super(RyuBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- self.stop_ryubgp(retry=True)
- self.start_ryubgp(retry=True)
diff --git a/tests/integrated/bgp/base.py
b/tests/integrated/bgp/base.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
index 26fa396..2f210de 100644
--- a/tests/integrated/bgp/base.py
+++ b/tests/integrated/bgp/base.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/base_ip6.py
b/tests/integrated/bgp/base_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
ip6.py
index be26faf..d867920 100644
--- a/tests/integrated/bgp/base_ip6.py
+++ b/tests/integrated/bgp/base_ip6.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/test_basic.py
b/tests/integrated/bgp/test_basic.py
index 5817d44..d1eda39 100644
--- a/tests/integrated/bgp/test_basic.py
+++ b/tests/integrated/bgp/test_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base
diff --git a/tests/integrated/bgp/test_ip6_basic.py
b/tests/integrated/bgp/test_ip6_basic.py
index 40461a5..911a0b5 100644
--- a/tests/integrated/bgp/test_ip6_basic.py
+++ b/tests/integrated/bgp/test_ip6_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base_ip6 as base
--
2.7.4
------------------------------------------------------------
------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
Ryu-devel mailing list
https://lists.sourceforge.net/lists/listinfo/ryu-devel
--
FUJITA Tomonori
2017-07-13 00:13:22 UTC
Permalink
CC'ed Iwase,

On Thu, 13 Jul 2017 09:15:25 +0900
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Hi Yamamoto-San,
On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.
is it difficult to avoid breaking it?
eg. make it try-import new and old locations.
Can we just let the testing directory alone? Nobody complains the size
so far. I don't like to create issues for non-existing issue.

Opinions?
Iwase Yusuke
2017-07-13 00:44:30 UTC
Permalink
Hi,
Post by FUJITA Tomonori
CC'ed Iwase,
On Thu, 13 Jul 2017 09:15:25 +0900
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Hi Yamamoto-San,
On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.
is it difficult to avoid breaking it?
eg. make it try-import new and old locations.
Can we just let the testing directory alone? Nobody complains the size
so far. I don't like to create issues for non-existing issue.
Opinions?
Yes, I think we can. Not so many users complain it, I guess.
Some user suggested to separate Ryu before, but with no response...
[Ryu-devel] Splitting Ryu
https://www.mail-archive.com/ryu-***@lists.sourceforge.net/msg13454.html

BTW, which modules or APIs are defined as the "public" libraries or APIs of Ryu?
Or which are the "internal" (can be changed or relocated)?
Does any one know its policy?
"All APIs" must be the public and must be taken care of the use of other project?
FUJITA Tomonori
2017-07-13 00:52:01 UTC
Permalink
On Thu, 13 Jul 2017 09:44:30 +0900
Post by Iwase Yusuke
Hi,
Post by FUJITA Tomonori
CC'ed Iwase,
On Thu, 13 Jul 2017 09:15:25 +0900
On Thu, Jul 13, 2017 at 7:04 AM, fumihiko kakuma
Post by fumihiko kakuma
Hi Yamamoto-San,
On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.
is it difficult to avoid breaking it?
eg. make it try-import new and old locations.
Can we just let the testing directory alone? Nobody complains the size
so far. I don't like to create issues for non-existing issue.
Opinions?
Yes, I think we can. Not so many users complain it, I guess.
Some user suggested to separate Ryu before, but with no response...
[Ryu-devel] Splitting Ryu
As far as I know, He wants to avoid version updates due to non
openflow changes (e.g., bgp).
Post by Iwase Yusuke
BTW, which modules or APIs are defined as the "public" libraries or APIs of Ryu?
Or which are the "internal" (can be changed or relocated)?
Does any one know its policy?
"All APIs" must be the public and must be taken care of the use of other project?
For me, no APIs are written in stone. But I want to avoid disrupting
major users like OpenStack.
Iwase Yusuke
2017-07-13 01:49:04 UTC
Permalink
Post by FUJITA Tomonori
On Thu, 13 Jul 2017 09:44:30 +0900
Post by Iwase Yusuke
Hi,
Post by FUJITA Tomonori
CC'ed Iwase,
On Thu, 13 Jul 2017 09:15:25 +0900
On Thu, Jul 13, 2017 at 7:04 AM, fumihiko kakuma
Post by fumihiko kakuma
Hi Yamamoto-San,
On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.
is it difficult to avoid breaking it?
eg. make it try-import new and old locations.
Can we just let the testing directory alone? Nobody complains the size
so far. I don't like to create issues for non-existing issue.
Opinions?
Yes, I think we can. Not so many users complain it, I guess.
Some user suggested to separate Ryu before, but with no response...
[Ryu-devel] Splitting Ryu
As far as I know, He wants to avoid version updates due to non
openflow changes (e.g., bgp).
His purpose seems to be so, but the cause of this problem is that
Ryu has many features other than OpenFlow and has many components
in its self.
So I tried to separate components into some independent objects.
Post by FUJITA Tomonori
Post by Iwase Yusuke
BTW, which modules or APIs are defined as the "public" libraries or APIs of Ryu?
Or which are the "internal" (can be changed or relocated)?
Does any one know its policy?
"All APIs" must be the public and must be taken care of the use of other project?
For me, no APIs are written in stone. But I want to avoid disrupting
major users like OpenStack.
Thanks, I remember the same issue was discussed before...
Just I never thought OpenStack project uses testing libraries of Ryu...
fumihiko kakuma
2017-07-13 01:00:12 UTC
Permalink
On Thu, 13 Jul 2017 09:15:25 +0900
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Hi Yamamoto-San,
On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.
is it difficult to avoid breaking it?
eg. make it try-import new and old locations.
This change only breaks scenario tests in neutron-dynamic-routing
and currently they are non-voting job.
Don't you like it?
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Thanks,
Kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
This patch requires the v2 patch of Iwase-San.
---
.travis.yml | 2 +-
ryu/lib/docker/__init__.py | 0
ryu/lib/docker/docker_base.py | 801
+++++++++++++++++++++
ryu/lib/docker/install_docker_test_pkg.sh | 43 ++
ryu/lib/docker/install_docker_test_pkg_common.sh | 39 +
.../docker/install_docker_test_pkg_for_travis.sh | 12 +
ryu/lib/docker/quagga.py | 332 +++++++++
ryu/lib/docker/ryubgp.py | 212 ++++++
ryu/tests/integrated/common/__init__.py | 0
ryu/tests/integrated/common/docker_base.py | 801
---------------------
.../integrated/common/install_docker_test_pkg.sh | 43 --
.../common/install_docker_test_pkg_common.sh | 39 -
.../common/install_docker_test_pkg_for_travis.sh | 12 -
ryu/tests/integrated/common/quagga.py | 332 ---------
ryu/tests/integrated/common/ryubgp.py | 212 ------
tests/integrated/bgp/base.py | 6 +-
tests/integrated/bgp/base_ip6.py | 6 +-
tests/integrated/bgp/test_basic.py | 2 +-
tests/integrated/bgp/test_ip6_basic.py | 2 +-
19 files changed, 1448 insertions(+), 1448 deletions(-)
create mode 100644 ryu/lib/docker/__init__.py
create mode 100644 ryu/lib/docker/docker_base.py
create mode 100644 ryu/lib/docker/install_docker_test_pkg.sh
create mode 100644 ryu/lib/docker/install_docker_test_pkg_common.sh
create mode 100644 ryu/lib/docker/install_docker_
test_pkg_for_travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
create mode 100644 ryu/lib/docker/quagga.py
create mode 100644 ryu/lib/docker/ryubgp.py
delete mode 100644 ryu/tests/integrated/common/__init__.py
delete mode 100644 ryu/tests/integrated/common/docker_base.py
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
common.sh
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_for_travis.sh
delete mode 100644 ryu/tests/integrated/common/quagga.py
delete mode 100644 ryu/tests/integrated/common/ryubgp.py
diff --git a/.travis.yml b/.travis.yml
index 9e5474a..cd35aac 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ sudo: required # Required to enable Docker service
- pip install tox coveralls
- - bash ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
+ - bash ryu/lib/docker/install_docker_test_pkg_for_travis.sh
- NOSE_VERBOSE=0 tox -e $TOX_ENV
diff --git a/ryu/lib/docker/__init__.py b/ryu/lib/docker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/ryu/lib/docker/docker_base.py
b/ryu/lib/docker/docker_base.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
new file mode 100644
index 0000000..1ae2cc2
--- /dev/null
+++ b/ryu/lib/docker/docker_base.py
@@ -0,0 +1,801 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/base.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import itertools
+import logging
+import os
+import subprocess
+import time
+
+import netaddr
+import six
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TEST_PREFIX = ''
+DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
+TEST_PREFIX = DEFAULT_TEST_PREFIX
+TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
+
+BGP_FSM_IDLE = 'BGP_FSM_IDLE'
+BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
+BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
+
+BGP_ATTR_TYPE_ORIGIN = 1
+BGP_ATTR_TYPE_AS_PATH = 2
+BGP_ATTR_TYPE_NEXT_HOP = 3
+BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
+BGP_ATTR_TYPE_LOCAL_PREF = 5
+BGP_ATTR_TYPE_COMMUNITIES = 8
+BGP_ATTR_TYPE_ORIGINATOR_ID = 9
+BGP_ATTR_TYPE_CLUSTER_LIST = 10
+BGP_ATTR_TYPE_MP_REACH_NLRI = 14
+BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+
+BRIDGE_TYPE_DOCKER = 'docker'
+BRIDGE_TYPE_BRCTL = 'brctl'
+BRIDGE_TYPE_OVS = 'ovs'
+
+
+ super(CommandError, self).__init__()
+ self.out = out
+
+
+ e = RuntimeError()
+ r = f()
+ time.sleep(s)
+ return r
+ raise e
+
+
+ super(CmdBuffer, self).__init__()
+ self.delim = delim
+
+ self.append(value)
+
+ return self.delim.join(self)
+
+
+
+ stdout = stdout or ''
+ obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
+ obj.stderr = stderr or ''
+ obj.command = command
+ obj.returncode = returncode
+ return obj
+
+
+
+ """Execute a command using subprocess.Popen()
+ - out: stdout from subprocess.Popen()
+ out has some attributes.
+ out.returncode: returncode of subprocess.Popen()
+ out.stderr: stderr from subprocess.Popen()
+ """
+ p_stdout = subprocess.PIPE
+ p_stderr = subprocess.PIPE
+ p_stdout = None
+ p_stderr = None
+ pop = subprocess.Popen(cmd, shell=True, executable=executable,
+ stdout=p_stdout,
+ stderr=p_stderr)
+ __stdout, __stderr = pop.communicate()
+ _stdout = six.text_type(__stdout, 'utf-8')
+ _stderr = six.text_type(__stderr, 'utf-8')
+ out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
+ return out
+
+ out = None
+ out = self._execute(cmd, capture=capture)
+ LOG.info(out.command)
+ return out
+ LOG.error("stdout: %s", out)
+ LOG.error("stderr: %s", out.stderr)
+ break
+ time.sleep(interval)
+ raise CommandError(out)
+
+ cmd = 'sudo %s' % cmd
+ return self.execute(cmd, capture=capture,
+ try_times=try_times, interval=interval)
+
+
+ self.baseimage = baseimage
+ self.cmd = Command()
+
+ out = self.cmd.sudo('sudo docker images')
+ images = []
+ images.append(line.split()[0])
+ return images
+
+ return name in self.get_images()
+
+ self.cmd.sudo(
+ "docker build -t {0} {1}".format(tagname, dockerfile_dir),
+ try_times=3)
+
+ return tagname
+ self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
+
+ def create_quagga(self, tagname='quagga', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ pkges = ' '.join([
+ 'telnet',
+ 'tcpdump',
+ 'quagga',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'RUN apt-get update'
+ c << 'RUN apt-get install -qy --no-install-recommends %s' %
pkges
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'CMD /usr/lib/quagga/bgpd'
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.build(tagname, workdir)
+ return tagname
+
+ def create_ryu(self, tagname='ryu', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ workdir_ctn = '/root/osrg/ryu'
+ pkges = ' '.join([
+ 'tcpdump',
+ 'iproute2',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'ADD ryu %s' % workdir_ctn
+ install = ' '.join([
+ 'RUN apt-get update',
+ '&& apt-get install -qy --no-install-recommends %s' %
pkges,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ '&& cd %s' % workdir_ctn,
+ # Note: Clean previous builds, because "python setup.py install"
+ # might fail if the current directory contains the
symlink to
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ # Docker host file systems.
+ '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
+ '&& pip install -r tools/pip-requires -r
tools/optional-requires',
+ '&& python setup.py install',
+ ])
+ c << install
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.cmd.execute('cp -r ../ryu %s/' % workdir)
+ self.build(tagname, workdir)
+ return tagname
+
+
+ def __init__(self, name, subnet='', start_ip=None, end_ip=None,
+ with_ip=True, self_ip=False,
+ fixed_ip=None, reuse=False,
+ """Manage a bridge
+ - name: bridge name
+ - subnet: network cider to be used in this bridge
+ - start_ip: start address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - end_ip: end address of an ip to be used in the subnet
+ - with_ip: specify if assign automatically an ip address
+ - self_ip: specify if assign an ip address for the bridge
+ - fixed_ip: an ip address to be assigned to the bridge
+ - reuse: specify if use an existing bridge
+ - br_type: One either in a 'docker', 'brctl' or 'ovs'
+ """
+ self.cmd = Command()
+ self.name = name
+ if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
+ raise Exception("argument error br_type: %s" % br_type)
+ self.br_type = br_type
+ self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
+ self.name = '{0}_{1}'.format(TEST_PREFIX, name)
+ self.with_ip = with_ip
+ self.subnet = netaddr.IPNetwork(subnet)
+ self.start_ip = start_ip
+ self.start_ip = netaddr.IPAddress(self.subnet.first)
+ self.end_ip = end_ip
+ self.end_ip = netaddr.IPAddress(self.subnet.last)
+
+ for host in netaddr.IPRange(self.start_ip,
+ yield host
+ self._ip_generator = _ip_gen()
+ # throw away first network address
+ self.next_ip_address()
+
+ self.self_ip = self_ip
+ self.ip_addr = fixed_ip
+ self.ip_addr = self.next_ip_address()
+ gw = "--gateway %s" % self.ip_addr.split('/')[0]
+ v6 = ''
+ v6 = '--ipv6'
+ cmd = ("docker network create --driver bridge %s "
+ "%s --subnet %s %s" % (v6, gw, subnet, self.name))
+ cmd = "ip link add {0} type bridge".format(
self.name)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ cmd = "ovs-vsctl add-br {0}".format(self.name)
+ raise ValueError('Unsupported br_type: %s' % self.br_type)
+ self.delete()
+ self.execute(cmd, sudo=True, retry=True)
+ try_several_times(f)
+ self.execute("ip link set up dev {0}".format(self.name),
+ sudo=True, retry=True)
+
+ ips = self.check_br_addr(self.name)
+ self.execute(
+ "ip addr del {0} dev {1}".format(ip,
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ sudo=True, retry=True)
+ self.execute(
+ "ip addr add {0} dev {1}".format(self.ip_addr,
self.name
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
),
+ sudo=True, retry=True)
+ self.ctns = []
+
+ out = self.execute('docker network ls', sudo=True, retry=True)
+ bridges = []
+ bridges.append(line.split()[1])
+ return bridges
+
+ out = self.execute('brctl show', retry=True)
+ bridges = []
+ bridges.append(line.split()[0])
+ return bridges
+
+ out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
+ return out.splitlines()
+
+ return self.get_bridges_dc()
+ return self.get_bridges_brctl()
+ return self.get_bridges_ovs()
+
+ return self.name in self.get_bridges()
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ return m(cmd, capture=capture)
+
+ ips = {}
+ cmd = "ip a show dev %s" % br
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[4] = elems[1]
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[6] = elems[1]
+ return ips
+
+ return "{0}/{1}".format(next(self._ip_generator),
+ self.subnet.prefixlen)
+
+ name = ctn.next_if_name()
+ self.ctns.append(ctn)
+ ip_address = None
+ ipv4 = None
+ ipv6 = None
+ ip_address = self.next_ip_address()
+ ip_address_ip = ip_address.split('/')[0]
+ version = 4
+ version = 6
+ opt_ip = "--ip %s" % ip_address_ip
+ ipv4 = ip_address
+ opt_ip = "--ip6 %s" % ip_address_ip
+ ipv6 = ip_address
+ cmd = "docker network connect %s %s %s" % (
+ opt_ip, self.name, ctn.docker_name())
+ self.execute(cmd, sudo=True)
+ ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=name)
+ ip_address = self.next_ip_address()
+ version = 4
+ version = 6
+ ctn.pipework(self, ip_address, name, version=version)
+ ctn.pipework(self, '0/0', name)
+ return ip_address
+
+ return
+ self.execute("docker network rm %s" % self.name,
+ sudo=True, retry=True)
+ self.execute("ip link set down dev %s" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ip link delete %s type bridge" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ovs-vsctl del-br %s" % self.name,
+ sudo=True, retry=True)
+
+
+ self.name = name
+ self.image = image
+ self.shared_volumes = []
+ self.ip_addrs = []
+ self.ip6_addrs = []
+ self.is_running = False
+ self.eths = []
+ self.id = None
+
+ self.cmd = Command()
+ self.remove()
+
+ return self.name
+ return '{0}_{1}'.format(TEST_PREFIX, self.name)
+
+ return self.id
+ return self.docker_name()
+
+ name = 'eth{0}'.format(len(self.eths) + 1)
+ self.eths.append(name)
+ return name
+
+ def set_addr_info(self, bridge, ipv4=None, ipv6=None,
+ self.ip_addrs.append((ifname, ipv4, bridge))
+ self.ip6_addrs.append((ifname, ipv6, bridge))
+
+ addrinfo = {}
+ ip_addrs = self.ip_addrs
+ ip_addrs = self.ip6_addrs
+ return None
+ addrinfo[addr[1]] = addr[0]
+ return addrinfo
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ return m(cmd, capture=capture)
+
+ return self.cmd.sudo(cmd, capture=capture, try_times=3,
interval=1)
+ return self.cmd.sudo(cmd, capture=capture)
+
+ name = self.docker_name()
+ flag = '-d' if detach else ''
+ return self.dcexec('docker exec {0} {1} {2}'.format(
+ flag, name, cmd), capture=capture)
+
+ cmd = 'docker ps --no-trunc=true'
+ cmd += ' --all=true'
+ out = self.dcexec(cmd, retry=True)
+ containers = []
+ containers.append(line.split()[-1])
+ return containers
+
+ return self.docker_name() in self.get_containers(allctn=
allctn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c = CmdBuffer(' ')
+ c << "docker run --privileged=true"
+ c << "-v {0}:{1}".format(sv[0], sv[1])
+ c << "--name {0} --hostname {0} -id
{1}".format(self.docker_name()
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
,
+ self.image)
+ self.id = self.dcexec(str(c), retry=True)
+ self.is_running = True
+ self.exec_on_ctn("ip li set up dev lo")
+ ipv4 = None
+ ipv6 = None
+ for line in self.exec_on_ctn("ip a show dev
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv4 = elems[1]
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv6 = elems[1]
+ self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
+ ifname='eth0')
+ return 0
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ LOG.warning('Call run() before pipeworking')
+ return
+ c = CmdBuffer(' ')
+ c << "pipework {0}".format(bridge.name)
+
+ c << "-i {0}".format(intf_name)
+ intf_name = "eth1"
+ ipv4 = None
+ ipv6 = None
+ ipv4 = ip_addr
+ c << '-a 6'
+ ipv6 = ip_addr
+ c << "{0} {1}".format(self.docker_name(), ip_addr)
+ self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=intf_name)
+ self.execute(str(c), sudo=True, retry=True)
+
+ cmd = "docker inspect -f '{{.State.Pid}}' %s" % self.docker_name()
+ return int(self.dcexec(cmd))
+ return -1
+
+ interface = "eth0"
+ filename = "{0}/{1}.dump".format(
+ self.shared_volumes[0][1], interface)
+ self.exec_on_ctn(
+ "tcpdump -i {0} -w {1}".format(interface, filename),
+ detach=True)
+
+
+
+ WAIT_FOR_BOOT = 1
+ RETRY_INTERVAL = 5
+ DEFAULT_PEER_ARG = {'neigh_addr': '',
+ 'passwd': None,
+ 'vpn': False,
+ 'flowspec': False,
+ 'is_rs_client': False,
+ 'is_rr_client': False,
+ 'cluster_id': None,
+ 'policies': None,
+ 'passive': False,
+ 'local_addr': '',
+ 'as2': False,
+ 'graceful_restart': None,
+ 'local_as': None,
+ 'prefix_limit': None}
+ default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
+ DEFAULT_ROUTE_ARG = {'prefix': None,
+ 'rf': 'ipv4',
+ 'attr': None,
+ 'next-hop': None,
+ 'as-path': None,
+ 'community': None,
+ 'med': None,
+ 'local-pref': None,
+ 'extended-community': None,
+ 'matchs': None,
+ 'thens': None}
+ default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
+
+ self.config_dir = TEST_BASE_DIR
+ self.config_dir = os.path.join(self.config_dir,
TEST_PREFIX)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.config_dir = os.path.join(self.config_dir, name)
+ self.asn = asn
+ self.router_id = router_id
+ self.peers = {}
+ self.routes = {}
+ self.policies = {}
+ super(BGPContainer, self).__init__(name, ctn_image_name)
+ self.execute(
+ 'rm -rf {0}'.format(self.config_dir), sudo=True)
+ self.execute('mkdir -p {0}'.format(self.config_dir))
+ self.execute('chmod 777 {0}'.format(self.config_dir))
+
+ return str({'name': self.name, 'asn': self.asn,
+ 'router_id': self.router_id})
+
+ self.create_config()
+ super(BGPContainer, self).run()
+ time.sleep(w_time)
+ return w_time
+
+ def add_peer(self, peer, bridge='', reload_config=True, v6=False,
+ peer_info = peer_info or {}
+ self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
+ self.peers[peer].update(peer_info)
+ peer_keys = sorted(self.peers[peer].keys())
+ raise Exception("argument error peer_info: %s" %
peer_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ neigh_addr = ''
+ local_addr = ''
+ it = itertools.product(self.ip_addrs, peer.ip_addrs)
+ it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
+
+ continue
+ neigh_addr = you[1]
+ local_addr = me[1]
+ addr, mask = local_addr.split('/')
+ local_addr = "{0}%{1}/{2}".format(addr, me[0],
mask)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ break
+
+ raise Exception('peer {0} seems not ip
reachable'.format(peer))
+
+ self.peers[peer]['policies'] = {}
+
+ self.peers[peer]['neigh_addr'] = neigh_addr
+ self.peers[peer]['local_addr'] = local_addr
+ self.create_config()
+ self.reload_config()
+
+ del self.peers[peer]
+ self.create_config()
+ self.reload_config()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ return self.execute('cat {0}/*.log'.format(self.config_dir))
+
+ route_info = route_info or {}
+ self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
+ self.routes[route].update(route_info)
+ route_keys = sorted(self.routes[route].keys())
+ raise Exception("argument error route_info: %s" %
route_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.routes[route]['prefix'] = route
+ self.create_config()
+ self.reload_config()
+
+ def add_policy(self, policy, peer, typ, default='accept',
+ self.set_default_policy(peer, typ, default)
+ self.define_policy(policy)
+ self.assign_policy(peer, policy, typ)
+ self.create_config()
+ self.reload_config()
+
+ if (typ in ['in', 'out', 'import', 'export'] and
+ self.peers[peer]['default-policy'] = {}
+ self.peers[peer]['default-policy'][typ] = default
+ raise Exception('wrong type or default')
+
+ self.policies[policy['name']] = policy
+
+ raise Exception('peer {0} not found'.format(peer.name))
+ name = policy['name']
+ raise Exception('policy {0} not found'.format(name))
+ self.peers[peer]['policies'][typ] = policy
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ version = netaddr.IPNetwork(prefix).version
+ addr = prefix.split('/')[0]
+ ping_cmd = 'ping'
+ ping_cmd = 'ping6'
+ raise Exception(
+ 'unsupported route family: {0}'.format(version))
+ cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs
echo"'.format(
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ping_cmd, addr)
+ interval = 1
+ count = 0
+ res = self.exec_on_ctn(cmd)
+ LOG.info(res)
+ break
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+ return True
+
+ interval = 1
+ count = 0
+ state = self.get_neighbor_state(peer)
+ LOG.info("%s's peer %s state: %s",
+ self.router_id, peer.router_id, state)
+ return
+
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+
+ cmd = '/sbin/ip route add {0} via {1}'.format(network,
next_hop)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.exec_on_ctn(cmd)
+
+ cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
+ self.exec_on_ctn(cmd)
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
diff --git a/ryu/lib/docker/install_docker_test_pkg.sh
b/ryu/lib/docker/install_docker_test_pkg.sh
new file mode 100644
index 0000000..a771dfc
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+function add_docker_aptline {
+ sudo apt-get update
+ if ! apt-cache search docker-engine | grep docker-engine; then
+ VER=`lsb_release -r`
+ if echo $VER | grep 12.04; then
+ REL_NAME=precise
+ elif echo $VER | grep 14.04; then
+ REL_NAME=trusty
+ elif echo $VER | grep 15.10; then
+ REL_NAME=wily
+ elif echo $VER | grep 16.04; then
+ REL_NAME=xenial
+ else
+ retrun 1
+ fi
+ RELEASE=ubuntu-$REL_NAME
+ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.
net:80
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+ sudo sh -c "echo deb https://apt.dockerproject.org/repo
$RELEASE
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
main > /etc/apt/sources.list.d/docker.list"
+ fi
+}
+
+init_variables
+
+if [ $APTLINE_DOCKER -eq 1 ]; then
+ add_docker_aptline
+fi
+
+sudo apt-get update
+if apt-cache search docker-engine | grep docker-engine; then
+ DOCKER_PKG=docker-engine
+else
+ DOCKER_PKG=docker.io
+fi
+sudo apt-get install -y $DOCKER_PKG
+install_depends_pkg
diff --git a/ryu/lib/docker/install_docker_test_pkg_common.sh
b/ryu/lib/docker/install_docker_test_pkg_common.sh
new file mode 100644
index 0000000..44a3e10
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_common.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -ex
+
+function init_variables {
+ APTLINE_DOCKER=0
+ DIR_BASE=/tmp
+}
+
+function process_options {
+ local max
+ local i
+ max=$#
+ i=1
+ while [ $i -le $max ]; do
+ case "$1" in
+ -a|--add-docker-aptline)
+ APTLINE_DOCKER=1
+ ;;
+ -d|--download-dir)
+ shift; ((i++))
+ DIR_BASE=$1
+ ;;
+ esac
+ shift; ((i++))
+ done
+}
+
+function install_pipework {
+ if ! which /usr/local/bin/pipework >/dev/null
+ then
+ sudo rm -rf $DIR_BASE/pipework
+ git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework
+ sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
+ fi
+}
+
+function install_depends_pkg {
+ install_pipework
+}
diff --git a/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
new file mode 100644
index 0000000..d8c3b49
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+init_variables
+
+sudo apt-get update
+install_depends_pkg
diff --git a/ryu/lib/docker/quagga.py b/ryu/lib/docker/quagga.py
new file mode 100644
index 0000000..9b6d218
--- /dev/null
+++ b/ryu/lib/docker/quagga.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/quagga.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import netaddr
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/quagga'
+
+ def __init__(self, name, asn, router_id, ctn_image_name,
+ super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.zebra = zebra
+ self._create_config_debian()
+
+ w_time = super(QuaggaBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ rib = []
+ return self.get_global_rib_with_prefix(prefix, rf)
+
+ out = self.vtysh('show bgp {0} unicast'.format(rf),
config=False)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return rib
+
+ read_next = False
+
+ ibgp = False
+ line = line[2:]
+ line = line[1:]
+ ibgp = True
+ continue
+
+ elems = line.split()
+
+ read_next = True
+ prefix = elems[0]
+ continue
+ nexthop = elems[0]
+ prefix = elems[0]
+ nexthop = elems[1]
+ read_next = False
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'ibgp': ibgp})
+
+ return rib
+
+ rib = []
+
+ lines = [line.strip() for line in self.vtysh(
+ 'show bgp {0} unicast {1}'.format(rf, prefix),
+ config=False).split('\n')]
+
+ return rib
+
+ lines = lines[2:]
+
+ lines.pop(0) # another useless line
+ elif lines[0].startswith('Advertised to non peer-group
+ lines = lines[2:] # other useless lines
+ raise Exception('unknown output format {0}'.format(lines))
+
+ aspath = []
+ aspath = [int(asn) for asn in lines[0].split()]
+
+ nexthop = lines[1].split()[0].strip()
+ info = [s.strip(',') for s in lines[2].split()]
+ attrs = []
+ med = info[info.index('metric') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
+ 'metric': int(med)})
+ localpref = info[info.index('localpref') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
+ 'value': int(localpref)})
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'aspath': aspath, 'attrs': attrs})
+
+ return rib
+
+ raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
+
+ info = [l.strip() for l in self.vtysh(
+ 'show bgp neighbors {0}'.format(neigh_addr),
+ config=False).split('\n')]
+
+ raise Exception('unknown format')
+
+ idx1 = info[0].index('BGP neighbor is ')
+ idx2 = info[0].index(',')
+ n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
+ idx1 = info[2].index('= ')
+ state = info[2][idx1 + len('= '):]
+ return base.BGP_FSM_IDLE
+ return base.BGP_FSM_ACTIVE
+ return base.BGP_FSM_ESTABLISHED
+ return state
+
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ self.vtysh('clear ip bgp * soft', config=False)
+
+ zebra = 'no'
+ self._create_config_bgp()
+ zebra = 'yes'
+ self._create_config_zebra()
+ self._create_config_daemons(zebra)
+
+ c = base.CmdBuffer()
+ c << 'vtysh_enable=yes'
+ c << 'zebra_options=" --daemon -A 127.0.0.1"'
+ c << 'bgpd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospfd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospf6d_options=" --daemon -A ::1"'
+ c << 'ripd_options=" --daemon -A 127.0.0.1"'
+ c << 'ripngd_options=" --daemon -A ::1"'
+ c << 'isisd_options=" --daemon -A 127.0.0.1"'
+ c << 'babeld_options=" --daemon -A 127.0.0.1"'
+ c << 'watchquagga_enable=yes'
+ c << 'watchquagga_options=(--daemon)'
+ with open('{0}/debian.conf'.format(self.config_dir), 'w') as
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'zebra=%s' % zebra
+ c << 'bgpd=yes'
+ c << 'ospfd=no'
+ c << 'ospf6d=no'
+ c << 'ripd=no'
+ c << 'ripngd=no'
+ c << 'isisd=no'
+ c << 'babeld=no'
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+
+ c = base.CmdBuffer()
+ c << 'hostname bgpd'
+ c << 'password zebra'
+ c << 'router bgp {0}'.format(self.asn)
+ c << 'bgp router-id {0}'.format(self.router_id)
+ if any(info['graceful_restart'] for info in
+ c << 'bgp graceful-restart'
+
+ version = 4
+ version = netaddr.IPNetwork(info['neigh_addr']).version
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << 'no bgp default ipv4-unicast'
+
+ c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
+ c << 'neighbor {0} route-server-client'.format(n_
addr)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'neighbor {0} route-map {1} {2}'.format(n_addr,
p['name'],
+ typ)
+ c << 'neighbor {0} password {1}'.format(n_addr,
info['passwd'])
+ c << 'neighbor {0} passive'.format(n_addr)
+ c << 'address-family ipv6 unicast'
+ c << 'neighbor {0} activate'.format(n_addr)
+ c << 'exit-address-family'
+
+ c << 'network {0}'.format(route['prefix'])
+ c << 'address-family ipv6 unicast'
+ c << 'network {0}'.format(route['prefix'])
+ c << 'exit-address-family'
+ raise Exception(
{0}'.format(route['rf']))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c << 'address-family ipv6 unicast'
+ c << 'redistribute connected'
+ c << 'exit-address-family'
+ c << 'redistribute connected'
+
+ c << 'access-list {0} {1} {2}'.format(name,
policy['type'],
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ policy['match'])
+ c << 'route-map {0} permit 10'.format(name)
+ c << 'match ip address {0}'.format(name)
+ c << 'set metric {0}'.format(policy['med'])
+
+ c << 'debug bgp as4'
+ c << 'debug bgp fsm'
+ c << 'debug bgp updates'
+ c << 'debug bgp events'
+ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
+
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'hostname zebra'
+ c << 'password zebra'
+ c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
+ c << 'debug zebra packet'
+ c << 'debug zebra kernel'
+ c << 'debug zebra rib'
+ c << ''
+
+ with open('{0}/zebra.conf'.format(self.config_dir), 'w') as
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ cmd = [cmd]
+ cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
+ return self.exec_on_ctn(
+ "vtysh -d bgpd -c 'en' -c 'conf t' -c "
+ "'router bgp {0}' {1}".format(self.asn, cmd),
+ capture=True)
+ return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
+ capture=True)
+
+ daemon = []
+ daemon.append('bgpd')
+ daemon.append('zebra')
+ cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
+ self.exec_on_ctn(cmd, capture=True)
+
+
+ def __init__(self, name, config, ctn_image_name,
+ asn = None
+ router_id = None
+ line = line.strip()
+ asn = int(line[len('router bgp'):].strip())
+ router_id = line[len('bgp router-id'):].strip()
+ raise Exception('asn not in quagga config')
+ raise Exception('router-id not in quagga config')
+ self.config = config
+ super(RawQuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ctn_image_name,
zebra)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ with open(os.path.join(self.config_dir, 'bgpd.conf'), 'w')
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(self.config)
+ f.writelines(self.config)
diff --git a/ryu/lib/docker/ryubgp.py b/ryu/lib/docker/ryubgp.py
new file mode 100644
index 0000000..8fe16f4
--- /dev/null
+++ b/ryu/lib/docker/ryubgp.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import time
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/ryu'
+
+ super(RyuBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
+ self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
+ self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
+ self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c = base.CmdBuffer()
+ c << '[DEFAULT]'
+ c << 'verbose=True'
+ c << 'log_file=/etc/ryu/manager.log'
+ LOG.info("[%s's new config]" % self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'import os'
+ c << ''
+ c << 'BGP = {'
+ c << " 'local_as': %s," % str(self.asn)
+ c << " 'router_id': '%s'," % self.router_id
+ c << " 'neighbors': ["
+ c << " {"
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << " 'address': '%s'," % n_addr
+ c << " 'remote_as': %s," % str(peer.asn)
+ c << " 'enable_ipv4': True,"
+ c << " 'enable_ipv6': True,"
+ c << " 'enable_vpnv4': True,"
+ c << " 'enable_vpnv6': True,"
+ c << ' },'
+ c << ' ],'
+ c << " 'routes': ["
+ c << " {"
+ c << " 'prefix': '%s'," % route['prefix']
+ c << " },"
+ c << " ],"
+ c << "}"
+ log_conf = """LOGGING = {
+
+ # We use python logging package for logging.
+ 'version': 1,
+ 'disable_existing_loggers': False,
+
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s ' +
+ '[%(process)d %(thread)d] %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(asctime)s %(module)s
%(lineno)s ' +
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ '%(message)s'
+ },
+ 'stats': {
+ 'format': '%(message)s'
+ },
+ },
+
+ 'handlers': {
+ # Outputs log to console.
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ 'console_stats': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'stats'
+ },
+ # Rotates log file when its size reaches 10MB.
+ 'log_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'bgpspeaker.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'verbose'
+ },
+ 'stats_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'statistics_bgps.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'stats'
+ },
+ },
+
+ # Fine-grained control of logging per instance.
+ 'loggers': {
+ 'bgpspeaker': {
+ 'handlers': ['console', 'log_file'],
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'stats': {
+ 'handlers': ['stats_file', 'console_stats'],
+ 'level': 'INFO',
+ 'propagate': False,
+ 'formatter': 'stats',
+ },
+ },
+
+ # Root loggers.
+ 'root': {
+ 'handlers': ['console', 'log_file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+}"""
+ c << log_conf
+ with open(os.path.join(self.config_dir, 'bgp_conf.py'), 'w')
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ self._create_config_ryu()
+ self._create_config_ryu_bgp()
+
+ results = self.exec_on_ctn('ps ax')
+ running = False
+ running = True
+ return running
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = "ryu-manager --verbose "
+ cmd += "--config-file %s " % self.SHARED_RYU_CONF
+ cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
+ cmd += "ryu.services.protocols.bgp.application"
+ self.exec_on_ctn(cmd, detach=True)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
+ self.exec_on_ctn(cmd)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ w_time = super(RyuBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ self.stop_ryubgp(retry=True)
+ self.start_ryubgp(retry=True)
diff --git a/ryu/tests/integrated/common/__init__.py
b/ryu/tests/integrated/common/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/ryu/tests/integrated/common/docker_base.py
b/ryu/tests/integrated/common/docker_base.py
deleted file mode 100644
index 1ae2cc2..0000000
--- a/ryu/tests/integrated/common/docker_base.py
+++ /dev/null
@@ -1,801 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/base.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import itertools
-import logging
-import os
-import subprocess
-import time
-
-import netaddr
-import six
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_TEST_PREFIX = ''
-DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
-TEST_PREFIX = DEFAULT_TEST_PREFIX
-TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
-
-BGP_FSM_IDLE = 'BGP_FSM_IDLE'
-BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
-BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
-
-BGP_ATTR_TYPE_ORIGIN = 1
-BGP_ATTR_TYPE_AS_PATH = 2
-BGP_ATTR_TYPE_NEXT_HOP = 3
-BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
-BGP_ATTR_TYPE_LOCAL_PREF = 5
-BGP_ATTR_TYPE_COMMUNITIES = 8
-BGP_ATTR_TYPE_ORIGINATOR_ID = 9
-BGP_ATTR_TYPE_CLUSTER_LIST = 10
-BGP_ATTR_TYPE_MP_REACH_NLRI = 14
-BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
-
-BRIDGE_TYPE_DOCKER = 'docker'
-BRIDGE_TYPE_BRCTL = 'brctl'
-BRIDGE_TYPE_OVS = 'ovs'
-
-
- super(CommandError, self).__init__()
- self.out = out
-
-
- e = RuntimeError()
- r = f()
- time.sleep(s)
- return r
- raise e
-
-
- super(CmdBuffer, self).__init__()
- self.delim = delim
-
- self.append(value)
-
- return self.delim.join(self)
-
-
-
- stdout = stdout or ''
- obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
- obj.stderr = stderr or ''
- obj.command = command
- obj.returncode = returncode
- return obj
-
-
-
- """Execute a command using subprocess.Popen()
- - out: stdout from subprocess.Popen()
- out has some attributes.
- out.returncode: returncode of subprocess.Popen()
- out.stderr: stderr from subprocess.Popen()
- """
- p_stdout = subprocess.PIPE
- p_stderr = subprocess.PIPE
- p_stdout = None
- p_stderr = None
- pop = subprocess.Popen(cmd, shell=True, executable=executable,
- stdout=p_stdout,
- stderr=p_stderr)
- __stdout, __stderr = pop.communicate()
- _stdout = six.text_type(__stdout, 'utf-8')
- _stderr = six.text_type(__stderr, 'utf-8')
- out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
- return out
-
- out = None
- out = self._execute(cmd, capture=capture)
- LOG.info(out.command)
- return out
- LOG.error("stdout: %s", out)
- LOG.error("stderr: %s", out.stderr)
- break
- time.sleep(interval)
- raise CommandError(out)
-
- cmd = 'sudo %s' % cmd
- return self.execute(cmd, capture=capture,
- try_times=try_times, interval=interval)
-
-
- self.baseimage = baseimage
- self.cmd = Command()
-
- out = self.cmd.sudo('sudo docker images')
- images = []
- images.append(line.split()[0])
- return images
-
- return name in self.get_images()
-
- self.cmd.sudo(
- "docker build -t {0} {1}".format(tagname, dockerfile_dir),
- try_times=3)
-
- return tagname
- self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
-
- def create_quagga(self, tagname='quagga', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- pkges = ' '.join([
- 'telnet',
- 'tcpdump',
- 'quagga',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'RUN apt-get update'
- c << 'RUN apt-get install -qy --no-install-recommends %s' %
pkges
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'CMD /usr/lib/quagga/bgpd'
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.build(tagname, workdir)
- return tagname
-
- def create_ryu(self, tagname='ryu', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- workdir_ctn = '/root/osrg/ryu'
- pkges = ' '.join([
- 'tcpdump',
- 'iproute2',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'ADD ryu %s' % workdir_ctn
- install = ' '.join([
- 'RUN apt-get update',
- '&& apt-get install -qy --no-install-recommends %s' %
pkges,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- '&& cd %s' % workdir_ctn,
- # Note: Clean previous builds, because "python setup.py install"
- # might fail if the current directory contains the
symlink to
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- # Docker host file systems.
- '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
- '&& pip install -r tools/pip-requires -r
tools/optional-requires',
- '&& python setup.py install',
- ])
- c << install
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.cmd.execute('cp -r ../ryu %s/' % workdir)
- self.build(tagname, workdir)
- return tagname
-
-
- def __init__(self, name, subnet='', start_ip=None, end_ip=None,
- with_ip=True, self_ip=False,
- fixed_ip=None, reuse=False,
- """Manage a bridge
- - name: bridge name
- - subnet: network cider to be used in this bridge
- - start_ip: start address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - end_ip: end address of an ip to be used in the subnet
- - with_ip: specify if assign automatically an ip address
- - self_ip: specify if assign an ip address for the bridge
- - fixed_ip: an ip address to be assigned to the bridge
- - reuse: specify if use an existing bridge
- - br_type: One either in a 'docker', 'brctl' or 'ovs'
- """
- self.cmd = Command()
- self.name = name
- if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
- raise Exception("argument error br_type: %s" % br_type)
- self.br_type = br_type
- self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
- self.name = '{0}_{1}'.format(TEST_PREFIX, name)
- self.with_ip = with_ip
- self.subnet = netaddr.IPNetwork(subnet)
- self.start_ip = start_ip
- self.start_ip = netaddr.IPAddress(self.subnet.first)
- self.end_ip = end_ip
- self.end_ip = netaddr.IPAddress(self.subnet.last)
-
- for host in netaddr.IPRange(self.start_ip,
- yield host
- self._ip_generator = _ip_gen()
- # throw away first network address
- self.next_ip_address()
-
- self.self_ip = self_ip
- self.ip_addr = fixed_ip
- self.ip_addr = self.next_ip_address()
- gw = "--gateway %s" % self.ip_addr.split('/')[0]
- v6 = ''
- v6 = '--ipv6'
- cmd = ("docker network create --driver bridge %s "
- "%s --subnet %s %s" % (v6, gw, subnet, self.name))
- cmd = "ip link add {0} type bridge".format(
self.name)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- cmd = "ovs-vsctl add-br {0}".format(self.name)
- raise ValueError('Unsupported br_type: %s' % self.br_type)
- self.delete()
- self.execute(cmd, sudo=True, retry=True)
- try_several_times(f)
- self.execute("ip link set up dev {0}".format(self.name),
- sudo=True, retry=True)
-
- ips = self.check_br_addr(self.name)
- self.execute(
- "ip addr del {0} dev {1}".format(ip,
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- sudo=True, retry=True)
- self.execute(
- "ip addr add {0} dev {1}".format(self.ip_addr,
self.name
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
),
- sudo=True, retry=True)
- self.ctns = []
-
- out = self.execute('docker network ls', sudo=True, retry=True)
- bridges = []
- bridges.append(line.split()[1])
- return bridges
-
- out = self.execute('brctl show', retry=True)
- bridges = []
- bridges.append(line.split()[0])
- return bridges
-
- out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
- return out.splitlines()
-
- return self.get_bridges_dc()
- return self.get_bridges_brctl()
- return self.get_bridges_ovs()
-
- return self.name in self.get_bridges()
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3, interval=1)
- return m(cmd, capture=capture)
-
- ips = {}
- cmd = "ip a show dev %s" % br
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[4] = elems[1]
- elems = [e.strip() for e in line.strip().split(' ')]
- ips[6] = elems[1]
- return ips
-
- return "{0}/{1}".format(next(self._ip_generator),
- self.subnet.prefixlen)
-
- name = ctn.next_if_name()
- self.ctns.append(ctn)
- ip_address = None
- ipv4 = None
- ipv6 = None
- ip_address = self.next_ip_address()
- ip_address_ip = ip_address.split('/')[0]
- version = 4
- version = 6
- opt_ip = "--ip %s" % ip_address_ip
- ipv4 = ip_address
- opt_ip = "--ip6 %s" % ip_address_ip
- ipv6 = ip_address
- cmd = "docker network connect %s %s %s" % (
- opt_ip, self.name, ctn.docker_name())
- self.execute(cmd, sudo=True)
- ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
- ifname=name)
- ip_address = self.next_ip_address()
- version = 4
- version = 6
- ctn.pipework(self, ip_address, name, version=version)
- ctn.pipework(self, '0/0', name)
- return ip_address
-
- return
- self.execute("docker network rm %s" % self.name,
- sudo=True, retry=True)
- self.execute("ip link set down dev %s" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ip link delete %s type bridge" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ovs-vsctl del-br %s" % self.name,
- sudo=True, retry=True)
-
-
- self.name = name
- self.image = image
- self.shared_volumes = []
- self.ip_addrs = []
- self.ip6_addrs = []
- self.is_running = False
- self.eths = []
- self.id = None
-
- self.cmd = Command()
- self.remove()
-
- return self.name
- return '{0}_{1}'.format(TEST_PREFIX, self.name)
-
- return self.id
- return self.docker_name()
-
- name = 'eth{0}'.format(len(self.eths) + 1)
- self.eths.append(name)
- return name
-
- def set_addr_info(self, bridge, ipv4=None, ipv6=None,
- self.ip_addrs.append((ifname, ipv4, bridge))
- self.ip6_addrs.append((ifname, ipv6, bridge))
-
- addrinfo = {}
- ip_addrs = self.ip_addrs
- ip_addrs = self.ip6_addrs
- return None
- addrinfo[addr[1]] = addr[0]
- return addrinfo
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3, interval=1)
- return m(cmd, capture=capture)
-
- return self.cmd.sudo(cmd, capture=capture, try_times=3,
interval=1)
- return self.cmd.sudo(cmd, capture=capture)
-
- name = self.docker_name()
- flag = '-d' if detach else ''
- return self.dcexec('docker exec {0} {1} {2}'.format(
- flag, name, cmd), capture=capture)
-
- cmd = 'docker ps --no-trunc=true'
- cmd += ' --all=true'
- out = self.dcexec(cmd, retry=True)
- containers = []
- containers.append(line.split()[-1])
- return containers
-
- return self.docker_name() in self.get_containers(allctn=
allctn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c = CmdBuffer(' ')
- c << "docker run --privileged=true"
- c << "-v {0}:{1}".format(sv[0], sv[1])
- c << "--name {0} --hostname {0} -id
{1}".format(self.docker_name()
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
,
- self.image)
- self.id = self.dcexec(str(c), retry=True)
- self.is_running = True
- self.exec_on_ctn("ip li set up dev lo")
- ipv4 = None
- ipv6 = None
- for line in self.exec_on_ctn("ip a show dev
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv4 = elems[1]
- elems = [e.strip() for e in line.strip().split(' ')]
- ipv6 = elems[1]
- self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
- ifname='eth0')
- return 0
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- LOG.warning('Call run() before pipeworking')
- return
- c = CmdBuffer(' ')
- c << "pipework {0}".format(bridge.name)
-
- c << "-i {0}".format(intf_name)
- intf_name = "eth1"
- ipv4 = None
- ipv6 = None
- ipv4 = ip_addr
- c << '-a 6'
- ipv6 = ip_addr
- c << "{0} {1}".format(self.docker_name(), ip_addr)
- self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
- ifname=intf_name)
- self.execute(str(c), sudo=True, retry=True)
-
- cmd = "docker inspect -f '{{.State.Pid}}' %s" % self.docker_name()
- return int(self.dcexec(cmd))
- return -1
-
- interface = "eth0"
- filename = "{0}/{1}.dump".format(
- self.shared_volumes[0][1], interface)
- self.exec_on_ctn(
- "tcpdump -i {0} -w {1}".format(interface, filename),
- detach=True)
-
-
-
- WAIT_FOR_BOOT = 1
- RETRY_INTERVAL = 5
- DEFAULT_PEER_ARG = {'neigh_addr': '',
- 'passwd': None,
- 'vpn': False,
- 'flowspec': False,
- 'is_rs_client': False,
- 'is_rr_client': False,
- 'cluster_id': None,
- 'policies': None,
- 'passive': False,
- 'local_addr': '',
- 'as2': False,
- 'graceful_restart': None,
- 'local_as': None,
- 'prefix_limit': None}
- default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
- DEFAULT_ROUTE_ARG = {'prefix': None,
- 'rf': 'ipv4',
- 'attr': None,
- 'next-hop': None,
- 'as-path': None,
- 'community': None,
- 'med': None,
- 'local-pref': None,
- 'extended-community': None,
- 'matchs': None,
- 'thens': None}
- default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
-
- self.config_dir = TEST_BASE_DIR
- self.config_dir = os.path.join(self.config_dir,
TEST_PREFIX)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.config_dir = os.path.join(self.config_dir, name)
- self.asn = asn
- self.router_id = router_id
- self.peers = {}
- self.routes = {}
- self.policies = {}
- super(BGPContainer, self).__init__(name, ctn_image_name)
- self.execute(
- 'rm -rf {0}'.format(self.config_dir), sudo=True)
- self.execute('mkdir -p {0}'.format(self.config_dir))
- self.execute('chmod 777 {0}'.format(self.config_dir))
-
- return str({'name': self.name, 'asn': self.asn,
- 'router_id': self.router_id})
-
- self.create_config()
- super(BGPContainer, self).run()
- time.sleep(w_time)
- return w_time
-
- def add_peer(self, peer, bridge='', reload_config=True, v6=False,
- peer_info = peer_info or {}
- self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
- self.peers[peer].update(peer_info)
- peer_keys = sorted(self.peers[peer].keys())
- raise Exception("argument error peer_info: %s" %
peer_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- neigh_addr = ''
- local_addr = ''
- it = itertools.product(self.ip_addrs, peer.ip_addrs)
- it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
-
- continue
- neigh_addr = you[1]
- local_addr = me[1]
- addr, mask = local_addr.split('/')
- local_addr = "{0}%{1}/{2}".format(addr, me[0],
mask)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- break
-
- raise Exception('peer {0} seems not ip
reachable'.format(peer))
-
- self.peers[peer]['policies'] = {}
-
- self.peers[peer]['neigh_addr'] = neigh_addr
- self.peers[peer]['local_addr'] = local_addr
- self.create_config()
- self.reload_config()
-
- del self.peers[peer]
- self.create_config()
- self.reload_config()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- return self.execute('cat {0}/*.log'.format(self.config_dir))
-
- route_info = route_info or {}
- self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
- self.routes[route].update(route_info)
- route_keys = sorted(self.routes[route].keys())
- raise Exception("argument error route_info: %s" %
route_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.routes[route]['prefix'] = route
- self.create_config()
- self.reload_config()
-
- def add_policy(self, policy, peer, typ, default='accept',
- self.set_default_policy(peer, typ, default)
- self.define_policy(policy)
- self.assign_policy(peer, policy, typ)
- self.create_config()
- self.reload_config()
-
- if (typ in ['in', 'out', 'import', 'export'] and
- self.peers[peer]['default-policy'] = {}
- self.peers[peer]['default-policy'][typ] = default
- raise Exception('wrong type or default')
-
- self.policies[policy['name']] = policy
-
- raise Exception('peer {0} not found'.format(peer.name))
- name = policy['name']
- raise Exception('policy {0} not found'.format(name))
- self.peers[peer]['policies'][typ] = policy
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- version = netaddr.IPNetwork(prefix).version
- addr = prefix.split('/')[0]
- ping_cmd = 'ping'
- ping_cmd = 'ping6'
- raise Exception(
- 'unsupported route family: {0}'.format(version))
- cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs
echo"'.format(
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ping_cmd, addr)
- interval = 1
- count = 0
- res = self.exec_on_ctn(cmd)
- LOG.info(res)
- break
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
- return True
-
- interval = 1
- count = 0
- state = self.get_neighbor_state(peer)
- LOG.info("%s's peer %s state: %s",
- self.router_id, peer.router_id, state)
- return
-
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
-
- cmd = '/sbin/ip route add {0} via {1}'.format(network,
next_hop)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.exec_on_ctn(cmd)
-
- cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
- self.exec_on_ctn(cmd)
-
- raise NotImplementedError()
-
- raise NotImplementedError()
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg.sh
b/ryu/tests/integrated/common/install_docker_test_pkg.sh
deleted file mode 100644
index a771dfc..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-function add_docker_aptline {
- sudo apt-get update
- if ! apt-cache search docker-engine | grep docker-engine; then
- VER=`lsb_release -r`
- if echo $VER | grep 12.04; then
- REL_NAME=precise
- elif echo $VER | grep 14.04; then
- REL_NAME=trusty
- elif echo $VER | grep 15.10; then
- REL_NAME=wily
- elif echo $VER | grep 16.04; then
- REL_NAME=xenial
- else
- retrun 1
- fi
- RELEASE=ubuntu-$REL_NAME
- sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.
net:80
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
- sudo sh -c "echo deb https://apt.dockerproject.org/repo
$RELEASE
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
main > /etc/apt/sources.list.d/docker.list"
- fi
-}
-
-init_variables
-
-if [ $APTLINE_DOCKER -eq 1 ]; then
- add_docker_aptline
-fi
-
-sudo apt-get update
-if apt-cache search docker-engine | grep docker-engine; then
- DOCKER_PKG=docker-engine
-else
- DOCKER_PKG=docker.io
-fi
-sudo apt-get install -y $DOCKER_PKG
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_
common.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
deleted file mode 100644
index 44a3e10..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-set -ex
-
-function init_variables {
- APTLINE_DOCKER=0
- DIR_BASE=/tmp
-}
-
-function process_options {
- local max
- local i
- max=$#
- i=1
- while [ $i -le $max ]; do
- case "$1" in
- -a|--add-docker-aptline)
- APTLINE_DOCKER=1
- ;;
- -d|--download-dir)
- shift; ((i++))
- DIR_BASE=$1
- ;;
- esac
- shift; ((i++))
- done
-}
-
-function install_pipework {
- if ! which /usr/local/bin/pipework >/dev/null
- then
- sudo rm -rf $DIR_BASE/pipework
- git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework
- sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
- fi
-}
-
-function install_depends_pkg {
- install_pipework
-}
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh
deleted file mode 100644
index d8c3b49..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-init_variables
-
-sudo apt-get update
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/quagga.py
b/ryu/tests/integrated/common/quagga.py
deleted file mode 100644
index 9b6d218..0000000
--- a/ryu/tests/integrated/common/quagga.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/quagga.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-
-import netaddr
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/quagga'
-
- def __init__(self, name, asn, router_id, ctn_image_name,
- super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.zebra = zebra
- self._create_config_debian()
-
- w_time = super(QuaggaBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- rib = []
- return self.get_global_rib_with_prefix(prefix, rf)
-
- out = self.vtysh('show bgp {0} unicast'.format(rf),
config=False)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return rib
-
- read_next = False
-
- ibgp = False
- line = line[2:]
- line = line[1:]
- ibgp = True
- continue
-
- elems = line.split()
-
- read_next = True
- prefix = elems[0]
- continue
- nexthop = elems[0]
- prefix = elems[0]
- nexthop = elems[1]
- read_next = False
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'ibgp': ibgp})
-
- return rib
-
- rib = []
-
- lines = [line.strip() for line in self.vtysh(
- 'show bgp {0} unicast {1}'.format(rf, prefix),
- config=False).split('\n')]
-
- return rib
-
- lines = lines[2:]
-
- lines.pop(0) # another useless line
- elif lines[0].startswith('Advertised to non peer-group
- lines = lines[2:] # other useless lines
- raise Exception('unknown output format {0}'.format(lines))
-
- aspath = []
- aspath = [int(asn) for asn in lines[0].split()]
-
- nexthop = lines[1].split()[0].strip()
- info = [s.strip(',') for s in lines[2].split()]
- attrs = []
- med = info[info.index('metric') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
- 'metric': int(med)})
- localpref = info[info.index('localpref') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
- 'value': int(localpref)})
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'aspath': aspath, 'attrs': attrs})
-
- return rib
-
- raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
-
- info = [l.strip() for l in self.vtysh(
- 'show bgp neighbors {0}'.format(neigh_addr),
- config=False).split('\n')]
-
- raise Exception('unknown format')
-
- idx1 = info[0].index('BGP neighbor is ')
- idx2 = info[0].index(',')
- n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
- idx1 = info[2].index('= ')
- state = info[2][idx1 + len('= '):]
- return base.BGP_FSM_IDLE
- return base.BGP_FSM_ACTIVE
- return base.BGP_FSM_ESTABLISHED
- return state
-
- raise Exception('not found peer {0}'.format(peer.router_id))
-
- self.vtysh('clear ip bgp * soft', config=False)
-
- zebra = 'no'
- self._create_config_bgp()
- zebra = 'yes'
- self._create_config_zebra()
- self._create_config_daemons(zebra)
-
- c = base.CmdBuffer()
- c << 'vtysh_enable=yes'
- c << 'zebra_options=" --daemon -A 127.0.0.1"'
- c << 'bgpd_options=" --daemon -A 127.0.0.1"'
- c << 'ospfd_options=" --daemon -A 127.0.0.1"'
- c << 'ospf6d_options=" --daemon -A ::1"'
- c << 'ripd_options=" --daemon -A 127.0.0.1"'
- c << 'ripngd_options=" --daemon -A ::1"'
- c << 'isisd_options=" --daemon -A 127.0.0.1"'
- c << 'babeld_options=" --daemon -A 127.0.0.1"'
- c << 'watchquagga_enable=yes'
- c << 'watchquagga_options=(--daemon)'
- with open('{0}/debian.conf'.format(self.config_dir), 'w') as
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'zebra=%s' % zebra
- c << 'bgpd=yes'
- c << 'ospfd=no'
- c << 'ospf6d=no'
- c << 'ripd=no'
- c << 'ripngd=no'
- c << 'isisd=no'
- c << 'babeld=no'
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
-
- c = base.CmdBuffer()
- c << 'hostname bgpd'
- c << 'password zebra'
- c << 'router bgp {0}'.format(self.asn)
- c << 'bgp router-id {0}'.format(self.router_id)
- if any(info['graceful_restart'] for info in
- c << 'bgp graceful-restart'
-
- version = 4
- version = netaddr.IPNetwork(info['neigh_addr']).version
- n_addr = info['neigh_addr'].split('/')[0]
- c << 'no bgp default ipv4-unicast'
-
- c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
- c << 'neighbor {0} route-server-client'.format(n_
addr)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'neighbor {0} route-map {1} {2}'.format(n_addr,
p['name'],
- typ)
- c << 'neighbor {0} password {1}'.format(n_addr,
info['passwd'])
- c << 'neighbor {0} passive'.format(n_addr)
- c << 'address-family ipv6 unicast'
- c << 'neighbor {0} activate'.format(n_addr)
- c << 'exit-address-family'
-
- c << 'network {0}'.format(route['prefix'])
- c << 'address-family ipv6 unicast'
- c << 'network {0}'.format(route['prefix'])
- c << 'exit-address-family'
- raise Exception(
{0}'.format(route['rf']))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c << 'address-family ipv6 unicast'
- c << 'redistribute connected'
- c << 'exit-address-family'
- c << 'redistribute connected'
-
- c << 'access-list {0} {1} {2}'.format(name,
policy['type'],
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- policy['match'])
- c << 'route-map {0} permit 10'.format(name)
- c << 'match ip address {0}'.format(name)
- c << 'set metric {0}'.format(policy['med'])
-
- c << 'debug bgp as4'
- c << 'debug bgp fsm'
- c << 'debug bgp updates'
- c << 'debug bgp events'
- c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
-
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'hostname zebra'
- c << 'password zebra'
- c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
- c << 'debug zebra packet'
- c << 'debug zebra kernel'
- c << 'debug zebra rib'
- c << ''
-
- with open('{0}/zebra.conf'.format(self.config_dir), 'w') as
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- cmd = [cmd]
- cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
- return self.exec_on_ctn(
- "vtysh -d bgpd -c 'en' -c 'conf t' -c "
- "'router bgp {0}' {1}".format(self.asn, cmd),
- capture=True)
- return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
- capture=True)
-
- daemon = []
- daemon.append('bgpd')
- daemon.append('zebra')
- cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
- self.exec_on_ctn(cmd, capture=True)
-
-
- def __init__(self, name, config, ctn_image_name,
- asn = None
- router_id = None
- line = line.strip()
- asn = int(line[len('router bgp'):].strip())
- router_id = line[len('bgp router-id'):].strip()
- raise Exception('asn not in quagga config')
- raise Exception('router-id not in quagga config')
- self.config = config
- super(RawQuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ctn_image_name,
zebra)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- with open(os.path.join(self.config_dir, 'bgpd.conf'), 'w')
- LOG.info("[%s's new config]", self.name)
- LOG.info(self.config)
- f.writelines(self.config)
diff --git a/ryu/tests/integrated/common/ryubgp.py
b/ryu/tests/integrated/common/ryubgp.py
deleted file mode 100644
index 8fe16f4..0000000
--- a/ryu/tests/integrated/common/ryubgp.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-import time
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/ryu'
-
- super(RyuBGPContainer, self).__init__(name, asn, router_id,
- ctn_image_name)
- self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
- self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
- self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
- self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c = base.CmdBuffer()
- c << '[DEFAULT]'
- c << 'verbose=True'
- c << 'log_file=/etc/ryu/manager.log'
- LOG.info("[%s's new config]" % self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'import os'
- c << ''
- c << 'BGP = {'
- c << " 'local_as': %s," % str(self.asn)
- c << " 'router_id': '%s'," % self.router_id
- c << " 'neighbors': ["
- c << " {"
- n_addr = info['neigh_addr'].split('/')[0]
- c << " 'address': '%s'," % n_addr
- c << " 'remote_as': %s," % str(peer.asn)
- c << " 'enable_ipv4': True,"
- c << " 'enable_ipv6': True,"
- c << " 'enable_vpnv4': True,"
- c << " 'enable_vpnv6': True,"
- c << ' },'
- c << ' ],'
- c << " 'routes': ["
- c << " {"
- c << " 'prefix': '%s'," % route['prefix']
- c << " },"
- c << " ],"
- c << "}"
- log_conf = """LOGGING = {
-
- # We use python logging package for logging.
- 'version': 1,
- 'disable_existing_loggers': False,
-
- 'formatters': {
- 'verbose': {
- 'format': '%(levelname)s %(asctime)s %(module)s ' +
- '[%(process)d %(thread)d] %(message)s'
- },
- 'simple': {
- 'format': '%(levelname)s %(asctime)s %(module)s
%(lineno)s ' +
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- '%(message)s'
- },
- 'stats': {
- 'format': '%(message)s'
- },
- },
-
- 'handlers': {
- # Outputs log to console.
- 'console': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'simple'
- },
- 'console_stats': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'stats'
- },
- # Rotates log file when its size reaches 10MB.
- 'log_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'bgpspeaker.log'),
- 'maxBytes': '10000000',
- 'formatter': 'verbose'
- },
- 'stats_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'statistics_bgps.log'),
- 'maxBytes': '10000000',
- 'formatter': 'stats'
- },
- },
-
- # Fine-grained control of logging per instance.
- 'loggers': {
- 'bgpspeaker': {
- 'handlers': ['console', 'log_file'],
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'stats': {
- 'handlers': ['stats_file', 'console_stats'],
- 'level': 'INFO',
- 'propagate': False,
- 'formatter': 'stats',
- },
- },
-
- # Root loggers.
- 'root': {
- 'handlers': ['console', 'log_file'],
- 'level': 'DEBUG',
- 'propagate': True,
- },
-}"""
- c << log_conf
- with open(os.path.join(self.config_dir, 'bgp_conf.py'), 'w')
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- self._create_config_ryu()
- self._create_config_ryu_bgp()
-
- results = self.exec_on_ctn('ps ax')
- running = False
- running = True
- return running
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = "ryu-manager --verbose "
- cmd += "--config-file %s " % self.SHARED_RYU_CONF
- cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
- cmd += "ryu.services.protocols.bgp.application"
- self.exec_on_ctn(cmd, detach=True)
- result = True
- break
- time.sleep(1)
- return result
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
- self.exec_on_ctn(cmd)
- result = True
- break
- time.sleep(1)
- return result
-
- w_time = super(RyuBGPContainer,
- self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
- return w_time
-
- self.stop_ryubgp(retry=True)
- self.start_ryubgp(retry=True)
diff --git a/tests/integrated/bgp/base.py
b/tests/integrated/bgp/base.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
index 26fa396..2f210de 100644
--- a/tests/integrated/bgp/base.py
+++ b/tests/integrated/bgp/base.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/base_ip6.py
b/tests/integrated/bgp/base_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
ip6.py
index be26faf..d867920 100644
--- a/tests/integrated/bgp/base_ip6.py
+++ b/tests/integrated/bgp/base_ip6.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/test_basic.py
b/tests/integrated/bgp/test_basic.py
index 5817d44..d1eda39 100644
--- a/tests/integrated/bgp/test_basic.py
+++ b/tests/integrated/bgp/test_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base
diff --git a/tests/integrated/bgp/test_ip6_basic.py
b/tests/integrated/bgp/test_ip6_basic.py
index 40461a5..911a0b5 100644
--- a/tests/integrated/bgp/test_ip6_basic.py
+++ b/tests/integrated/bgp/test_ip6_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base_ip6 as base
--
2.7.4
------------------------------------------------------------
------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
Ryu-devel mailing list
https://lists.sourceforge.net/lists/listinfo/ryu-devel
--
--
fumihiko kakuma <***@valinux.co.jp>
Takashi YAMAMOTO
2017-07-13 04:15:46 UTC
Permalink
Post by FUJITA Tomonori
On Thu, 13 Jul 2017 09:15:25 +0900
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Hi Yamamoto-San,
On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.
is it difficult to avoid breaking it?
eg. make it try-import new and old locations.
This change only breaks scenario tests in neutron-dynamic-routing
and currently they are non-voting job.
Don't you like it?
i don't like any breaking changes.
import errors in tempest plugins is often a disaster.
Post by FUJITA Tomonori
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Thanks,
Kakuma
Post by Takashi YAMAMOTO
On Mon, Jul 10, 2017 at 12:51 PM, fumihiko kakuma <
Post by fumihiko kakuma
This patch requires the v2 patch of Iwase-San.
---
.travis.yml | 2 +-
ryu/lib/docker/__init__.py | 0
ryu/lib/docker/docker_base.py | 801
+++++++++++++++++++++
ryu/lib/docker/install_docker_test_pkg.sh | 43 ++
ryu/lib/docker/install_docker_test_pkg_common.sh | 39 +
.../docker/install_docker_test_pkg_for_travis.sh | 12 +
ryu/lib/docker/quagga.py | 332 +++++++++
ryu/lib/docker/ryubgp.py | 212 ++++++
ryu/tests/integrated/common/__init__.py | 0
ryu/tests/integrated/common/docker_base.py | 801
---------------------
.../integrated/common/install_docker_test_pkg.sh | 43 --
.../common/install_docker_test_pkg_common.sh | 39 -
.../common/install_docker_test_pkg_for_travis.sh | 12 -
ryu/tests/integrated/common/quagga.py | 332
---------
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
ryu/tests/integrated/common/ryubgp.py | 212 ------
tests/integrated/bgp/base.py | 6 +-
tests/integrated/bgp/base_ip6.py | 6 +-
tests/integrated/bgp/test_basic.py | 2 +-
tests/integrated/bgp/test_ip6_basic.py | 2 +-
19 files changed, 1448 insertions(+), 1448 deletions(-)
create mode 100644 ryu/lib/docker/__init__.py
create mode 100644 ryu/lib/docker/docker_base.py
create mode 100644 ryu/lib/docker/install_docker_test_pkg.sh
create mode 100644 ryu/lib/docker/install_docker_
test_pkg_common.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
create mode 100644 ryu/lib/docker/install_docker_
test_pkg_for_travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
create mode 100644 ryu/lib/docker/quagga.py
create mode 100644 ryu/lib/docker/ryubgp.py
delete mode 100644 ryu/tests/integrated/common/__init__.py
delete mode 100644 ryu/tests/integrated/common/docker_base.py
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
common.sh
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_for_travis.sh
delete mode 100644 ryu/tests/integrated/common/quagga.py
delete mode 100644 ryu/tests/integrated/common/ryubgp.py
diff --git a/.travis.yml b/.travis.yml
index 9e5474a..cd35aac 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ sudo: required # Required to enable Docker
service
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- pip install tox coveralls
- - bash ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
+ - bash ryu/lib/docker/install_docker_test_pkg_for_travis.sh
- NOSE_VERBOSE=0 tox -e $TOX_ENV
diff --git a/ryu/lib/docker/__init__.py
b/ryu/lib/docker/__init__.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
new file mode 100644
index 0000000..e69de29
diff --git a/ryu/lib/docker/docker_base.py
b/ryu/lib/docker/docker_base.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
new file mode 100644
index 0000000..1ae2cc2
--- /dev/null
+++ b/ryu/lib/docker/docker_base.py
@@ -0,0 +1,801 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/base.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import itertools
+import logging
+import os
+import subprocess
+import time
+
+import netaddr
+import six
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TEST_PREFIX = ''
+DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
+TEST_PREFIX = DEFAULT_TEST_PREFIX
+TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
+
+BGP_FSM_IDLE = 'BGP_FSM_IDLE'
+BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
+BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
+
+BGP_ATTR_TYPE_ORIGIN = 1
+BGP_ATTR_TYPE_AS_PATH = 2
+BGP_ATTR_TYPE_NEXT_HOP = 3
+BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
+BGP_ATTR_TYPE_LOCAL_PREF = 5
+BGP_ATTR_TYPE_COMMUNITIES = 8
+BGP_ATTR_TYPE_ORIGINATOR_ID = 9
+BGP_ATTR_TYPE_CLUSTER_LIST = 10
+BGP_ATTR_TYPE_MP_REACH_NLRI = 14
+BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+
+BRIDGE_TYPE_DOCKER = 'docker'
+BRIDGE_TYPE_BRCTL = 'brctl'
+BRIDGE_TYPE_OVS = 'ovs'
+
+
+ super(CommandError, self).__init__()
+ self.out = out
+
+
+ e = RuntimeError()
+ r = f()
+ time.sleep(s)
+ return r
+ raise e
+
+
+ super(CmdBuffer, self).__init__()
+ self.delim = delim
+
+ self.append(value)
+
+ return self.delim.join(self)
+
+
+
+ def __new__(cls, stdout, stderr, command, returncode,
+ stdout = stdout or ''
+ obj = super(CommandOut, cls).__new__(cls, stdout,
**kwargs)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ obj.stderr = stderr or ''
+ obj.command = command
+ obj.returncode = returncode
+ return obj
+
+
+
+ """Execute a command using subprocess.Popen()
+ - out: stdout from subprocess.Popen()
+ out has some attributes.
+ out.returncode: returncode of subprocess.Popen()
+ out.stderr: stderr from subprocess.Popen()
+ """
+ p_stdout = subprocess.PIPE
+ p_stderr = subprocess.PIPE
+ p_stdout = None
+ p_stderr = None
+ pop = subprocess.Popen(cmd, shell=True,
executable=executable,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ stdout=p_stdout,
+ stderr=p_stderr)
+ __stdout, __stderr = pop.communicate()
+ _stdout = six.text_type(__stdout, 'utf-8')
+ _stderr = six.text_type(__stderr, 'utf-8')
+ out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
+ return out
+
+ out = None
+ out = self._execute(cmd, capture=capture)
+ LOG.info(out.command)
+ return out
+ LOG.error("stdout: %s", out)
+ LOG.error("stderr: %s", out.stderr)
+ break
+ time.sleep(interval)
+ raise CommandError(out)
+
+ cmd = 'sudo %s' % cmd
+ return self.execute(cmd, capture=capture,
+ try_times=try_times,
interval=interval)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+
+ self.baseimage = baseimage
+ self.cmd = Command()
+
+ out = self.cmd.sudo('sudo docker images')
+ images = []
+ images.append(line.split()[0])
+ return images
+
+ return name in self.get_images()
+
+ self.cmd.sudo(
+ "docker build -t {0} {1}".format(tagname,
dockerfile_dir),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ try_times=3)
+
+ return tagname
+ self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
+
+ def create_quagga(self, tagname='quagga', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ pkges = ' '.join([
+ 'telnet',
+ 'tcpdump',
+ 'quagga',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'RUN apt-get update'
+ c << 'RUN apt-get install -qy --no-install-recommends %s'
%
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
pkges
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'CMD /usr/lib/quagga/bgpd'
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.build(tagname, workdir)
+ return tagname
+
+ def create_ryu(self, tagname='ryu', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ workdir_ctn = '/root/osrg/ryu'
+ pkges = ' '.join([
+ 'tcpdump',
+ 'iproute2',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'ADD ryu %s' % workdir_ctn
+ install = ' '.join([
+ 'RUN apt-get update',
+ '&& apt-get install -qy --no-install-recommends %s' %
pkges,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ '&& cd %s' % workdir_ctn,
+ # Note: Clean previous builds, because "python
setup.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
install"
+ # might fail if the current directory contains the
symlink to
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ # Docker host file systems.
+ '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
+ '&& pip install -r tools/pip-requires -r
tools/optional-requires',
+ '&& python setup.py install',
+ ])
+ c << install
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.cmd.execute('cp -r ../ryu %s/' % workdir)
+ self.build(tagname, workdir)
+ return tagname
+
+
+ def __init__(self, name, subnet='', start_ip=None,
end_ip=None,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ with_ip=True, self_ip=False,
+ fixed_ip=None, reuse=False,
+ """Manage a bridge
+ - name: bridge name
+ - subnet: network cider to be used in this bridge
+ - start_ip: start address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - end_ip: end address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - with_ip: specify if assign automatically an ip
address
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - self_ip: specify if assign an ip address for the
bridge
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - fixed_ip: an ip address to be assigned to the bridge
+ - reuse: specify if use an existing bridge
+ - br_type: One either in a 'docker', 'brctl' or 'ovs'
+ """
+ self.cmd = Command()
+ self.name = name
+ if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
+ raise Exception("argument error br_type: %s" %
br_type)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.br_type = br_type
+ self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
+ self.name = '{0}_{1}'.format(TEST_PREFIX, name)
+ self.with_ip = with_ip
+ self.subnet = netaddr.IPNetwork(subnet)
+ self.start_ip = start_ip
+ self.start_ip = netaddr.IPAddress(self.subnet.
first)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.end_ip = end_ip
+ self.end_ip = netaddr.IPAddress(self.subnet.last)
+
+ for host in netaddr.IPRange(self.start_ip,
+ yield host
+ self._ip_generator = _ip_gen()
+ # throw away first network address
+ self.next_ip_address()
+
+ self.self_ip = self_ip
+ self.ip_addr = fixed_ip
+ self.ip_addr = self.next_ip_address()
+ gw = "--gateway %s" %
self.ip_addr.split('/')[0]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ v6 = ''
+ v6 = '--ipv6'
+ cmd = ("docker network create --driver bridge
%s "
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ "%s --subnet %s %s" % (v6, gw, subnet,
self.name))
+ cmd = "ip link add {0} type bridge".format(
self.name)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ cmd = "ovs-vsctl add-br {0}".format(self.name
)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ raise ValueError('Unsupported br_type: %s' %
self.br_type)
+ self.delete()
+ self.execute(cmd, sudo=True, retry=True)
+ try_several_times(f)
+ self.execute("ip link set up dev {0}".format(
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ sudo=True, retry=True)
+
+ ips = self.check_br_addr(self.name)
+ self.execute(
+ "ip addr del {0} dev {1}".format(ip,
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ sudo=True, retry=True)
+ self.execute(
+ "ip addr add {0} dev {1}".format(self.ip_addr,
self.name
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
),
+ sudo=True, retry=True)
+ self.ctns = []
+
+ out = self.execute('docker network ls', sudo=True,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ bridges = []
+ bridges.append(line.split()[1])
+ return bridges
+
+ out = self.execute('brctl show', retry=True)
+ bridges = []
+ bridges.append(line.split()[0])
+ return bridges
+
+ out = self.execute('ovs-vsctl list-br', sudo=True,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return out.splitlines()
+
+ return self.get_bridges_dc()
+ return self.get_bridges_brctl()
+ return self.get_bridges_ovs()
+
+ return self.name in self.get_bridges()
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3,
interval=1)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return m(cmd, capture=capture)
+
+ ips = {}
+ cmd = "ip a show dev %s" % br
+ elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ips[4] = elems[1]
+ elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ips[6] = elems[1]
+ return ips
+
+ return "{0}/{1}".format(next(self._ip_generator),
+ self.subnet.prefixlen)
+
+ name = ctn.next_if_name()
+ self.ctns.append(ctn)
+ ip_address = None
+ ipv4 = None
+ ipv6 = None
+ ip_address = self.next_ip_address()
+ ip_address_ip = ip_address.split('/')[0]
+ version = 4
+ version = 6
+ opt_ip = "--ip %s" % ip_address_ip
+ ipv4 = ip_address
+ opt_ip = "--ip6 %s" % ip_address_ip
+ ipv6 = ip_address
+ cmd = "docker network connect %s %s %s" % (
+ opt_ip, self.name, ctn.docker_name())
+ self.execute(cmd, sudo=True)
+ ctn.set_addr_info(bridge=self.name, ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ifname=name)
+ ip_address = self.next_ip_address()
+ version = 4
+ version = 6
+ ctn.pipework(self, ip_address, name,
version=version)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ctn.pipework(self, '0/0', name)
+ return ip_address
+
+ return
+ self.execute("docker network rm %s" % self.name,
+ sudo=True, retry=True)
+ self.execute("ip link set down dev %s" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ip link delete %s type bridge" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ovs-vsctl del-br %s" % self.name,
+ sudo=True, retry=True)
+
+
+ self.name = name
+ self.image = image
+ self.shared_volumes = []
+ self.ip_addrs = []
+ self.ip6_addrs = []
+ self.is_running = False
+ self.eths = []
+ self.id = None
+
+ self.cmd = Command()
+ self.remove()
+
+ return self.name
+ return '{0}_{1}'.format(TEST_PREFIX, self.name)
+
+ return self.id
+ return self.docker_name()
+
+ name = 'eth{0}'.format(len(self.eths) + 1)
+ self.eths.append(name)
+ return name
+
+ def set_addr_info(self, bridge, ipv4=None, ipv6=None,
+ self.ip_addrs.append((ifname, ipv4, bridge))
+ self.ip6_addrs.append((ifname, ipv6, bridge))
+
+ addrinfo = {}
+ ip_addrs = self.ip_addrs
+ ip_addrs = self.ip6_addrs
+ return None
+ addrinfo[addr[1]] = addr[0]
+ return addrinfo
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3,
interval=1)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return m(cmd, capture=capture)
+
+ return self.cmd.sudo(cmd, capture=capture,
try_times=3,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
interval=1)
+ return self.cmd.sudo(cmd, capture=capture)
+
+ name = self.docker_name()
+ flag = '-d' if detach else ''
+ return self.dcexec('docker exec {0} {1} {2}'.format(
+ flag, name, cmd), capture=capture)
+
+ cmd = 'docker ps --no-trunc=true'
+ cmd += ' --all=true'
+ out = self.dcexec(cmd, retry=True)
+ containers = []
+ containers.append(line.split()[-1])
+ return containers
+
+ return self.docker_name() in self.get_containers(allctn=
allctn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c = CmdBuffer(' ')
+ c << "docker run --privileged=true"
+ c << "-v {0}:{1}".format(sv[0], sv[1])
+ c << "--name {0} --hostname {0} -id
{1}".format(self.docker_name()
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
,
+
self.image)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.id = self.dcexec(str(c), retry=True)
+ self.is_running = True
+ self.exec_on_ctn("ip li set up dev lo")
+ ipv4 = None
+ ipv6 = None
+ for line in self.exec_on_ctn("ip a show dev
+ elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ipv4 = elems[1]
+ elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ipv6 = elems[1]
+ self.set_addr_info(bridge='docker0', ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ifname='eth0')
+ return 0
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker stop -t 0 %s' % ctn_id,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.is_running = False
+ return out
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ LOG.warning('Call run() before pipeworking')
+ return
+ c = CmdBuffer(' ')
+ c << "pipework {0}".format(bridge.name)
+
+ c << "-i {0}".format(intf_name)
+ intf_name = "eth1"
+ ipv4 = None
+ ipv6 = None
+ ipv4 = ip_addr
+ c << '-a 6'
+ ipv6 = ip_addr
+ c << "{0} {1}".format(self.docker_name(), ip_addr)
+ self.set_addr_info(bridge=bridge.name, ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ifname=intf_name)
+ self.execute(str(c), sudo=True, retry=True)
+
+ cmd = "docker inspect -f '{{.State.Pid}}' %s" %
self.docker_name()
+ return int(self.dcexec(cmd))
+ return -1
+
+ interface = "eth0"
+ filename = "{0}/{1}.dump".format(
+ self.shared_volumes[0][1], interface)
+ self.exec_on_ctn(
+ "tcpdump -i {0} -w {1}".format(interface, filename),
+ detach=True)
+
+
+
+ WAIT_FOR_BOOT = 1
+ RETRY_INTERVAL = 5
+ DEFAULT_PEER_ARG = {'neigh_addr': '',
+ 'passwd': None,
+ 'vpn': False,
+ 'flowspec': False,
+ 'is_rs_client': False,
+ 'is_rr_client': False,
+ 'cluster_id': None,
+ 'policies': None,
+ 'passive': False,
+ 'local_addr': '',
+ 'as2': False,
+ 'graceful_restart': None,
+ 'local_as': None,
+ 'prefix_limit': None}
+ default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
+ DEFAULT_ROUTE_ARG = {'prefix': None,
+ 'rf': 'ipv4',
+ 'attr': None,
+ 'next-hop': None,
+ 'as-path': None,
+ 'community': None,
+ 'med': None,
+ 'local-pref': None,
+ 'extended-community': None,
+ 'matchs': None,
+ 'thens': None}
+ default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
+
+ self.config_dir = TEST_BASE_DIR
+ self.config_dir = os.path.join(self.config_dir,
TEST_PREFIX)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.config_dir = os.path.join(self.config_dir, name)
+ self.asn = asn
+ self.router_id = router_id
+ self.peers = {}
+ self.routes = {}
+ self.policies = {}
+ super(BGPContainer, self).__init__(name, ctn_image_name)
+ self.execute(
+ 'rm -rf {0}'.format(self.config_dir), sudo=True)
+ self.execute('mkdir -p {0}'.format(self.config_dir))
+ self.execute('chmod 777 {0}'.format(self.config_dir))
+
+ return str({'name': self.name, 'asn': self.asn,
+ 'router_id': self.router_id})
+
+ self.create_config()
+ super(BGPContainer, self).run()
+ time.sleep(w_time)
+ return w_time
+
+ def add_peer(self, peer, bridge='', reload_config=True,
v6=False,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ peer_info = peer_info or {}
+ self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
+ self.peers[peer].update(peer_info)
+ peer_keys = sorted(self.peers[peer].keys())
+ raise Exception("argument error peer_info: %s" %
peer_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ neigh_addr = ''
+ local_addr = ''
+ it = itertools.product(self.ip_addrs, peer.ip_addrs)
+ it = itertools.product(self.ip6_addrs,
peer.ip6_addrs)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ continue
+ neigh_addr = you[1]
+ local_addr = me[1]
+ addr, mask = local_addr.split('/')
+ local_addr = "{0}%{1}/{2}".format(addr, me[0],
mask)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ break
+
+ raise Exception('peer {0} seems not ip
reachable'.format(peer))
+
+ self.peers[peer]['policies'] = {}
+
+ self.peers[peer]['neigh_addr'] = neigh_addr
+ self.peers[peer]['local_addr'] = local_addr
+ self.create_config()
+ self.reload_config()
+
+ del self.peers[peer]
+ self.create_config()
+ self.reload_config()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ return self.execute('cat {0}/*.log'.format(self.config_
dir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ def add_route(self, route, reload_config=True,
+ route_info = route_info or {}
+ self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
+ self.routes[route].update(route_info)
+ route_keys = sorted(self.routes[route].keys())
+ raise Exception("argument error route_info: %s" %
route_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.routes[route]['prefix'] = route
+ self.create_config()
+ self.reload_config()
+
+ def add_policy(self, policy, peer, typ, default='accept',
+ self.set_default_policy(peer, typ, default)
+ self.define_policy(policy)
+ self.assign_policy(peer, policy, typ)
+ self.create_config()
+ self.reload_config()
+
+ if (typ in ['in', 'out', 'import', 'export'] and
+ self.peers[peer]['default-policy'] = {}
+ self.peers[peer]['default-policy'][typ] = default
+ raise Exception('wrong type or default')
+
+ self.policies[policy['name']] = policy
+
+ raise Exception('peer {0} not found'.format(peer.name
))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ name = policy['name']
+ raise Exception('policy {0} not found'.format(name))
+ self.peers[peer]['policies'][typ] = policy
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ version = netaddr.IPNetwork(prefix).version
+ addr = prefix.split('/')[0]
+ ping_cmd = 'ping'
+ ping_cmd = 'ping6'
+ raise Exception(
+ 'unsupported route family: {0}'.format(version))
+ cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs
echo"'.format(
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ping_cmd, addr)
+ interval = 1
+ count = 0
+ res = self.exec_on_ctn(cmd)
+ LOG.info(res)
+ break
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+ return True
+
+ interval = 1
+ count = 0
+ state = self.get_neighbor_state(peer)
+ LOG.info("%s's peer %s state: %s",
+ self.router_id, peer.router_id, state)
+ return
+
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+
+ cmd = '/sbin/ip route add {0} via {1}'.format(network,
next_hop)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.exec_on_ctn(cmd)
+
+ cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
+ self.exec_on_ctn(cmd)
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
diff --git a/ryu/lib/docker/install_docker_test_pkg.sh
b/ryu/lib/docker/install_docker_test_pkg.sh
new file mode 100644
index 0000000..a771dfc
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+function add_docker_aptline {
+ sudo apt-get update
+ if ! apt-cache search docker-engine | grep docker-engine; then
+ VER=`lsb_release -r`
+ if echo $VER | grep 12.04; then
+ REL_NAME=precise
+ elif echo $VER | grep 14.04; then
+ REL_NAME=trusty
+ elif echo $VER | grep 15.10; then
+ REL_NAME=wily
+ elif echo $VER | grep 16.04; then
+ REL_NAME=xenial
+ else
+ retrun 1
+ fi
+ RELEASE=ubuntu-$REL_NAME
+ sudo apt-key adv --keyserver
hkp://p80.pool.sks-keyservers.
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
net:80
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+ sudo sh -c "echo deb https://apt.dockerproject.org/repo
$RELEASE
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
main > /etc/apt/sources.list.d/docker.list"
+ fi
+}
+
+init_variables
+
+if [ $APTLINE_DOCKER -eq 1 ]; then
+ add_docker_aptline
+fi
+
+sudo apt-get update
+if apt-cache search docker-engine | grep docker-engine; then
+ DOCKER_PKG=docker-engine
+else
+ DOCKER_PKG=docker.io
+fi
+sudo apt-get install -y $DOCKER_PKG
+install_depends_pkg
diff --git a/ryu/lib/docker/install_docker_test_pkg_common.sh
b/ryu/lib/docker/install_docker_test_pkg_common.sh
new file mode 100644
index 0000000..44a3e10
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_common.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -ex
+
+function init_variables {
+ APTLINE_DOCKER=0
+ DIR_BASE=/tmp
+}
+
+function process_options {
+ local max
+ local i
+ max=$#
+ i=1
+ while [ $i -le $max ]; do
+ case "$1" in
+ -a|--add-docker-aptline)
+ APTLINE_DOCKER=1
+ ;;
+ -d|--download-dir)
+ shift; ((i++))
+ DIR_BASE=$1
+ ;;
+ esac
+ shift; ((i++))
+ done
+}
+
+function install_pipework {
+ if ! which /usr/local/bin/pipework >/dev/null
+ then
+ sudo rm -rf $DIR_BASE/pipework
+ git clone https://github.com/jpetazzo/pipework.git
$DIR_BASE/pipework
+ sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
+ fi
+}
+
+function install_depends_pkg {
+ install_pipework
+}
diff --git a/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
new file mode 100644
index 0000000..d8c3b49
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+init_variables
+
+sudo apt-get update
+install_depends_pkg
diff --git a/ryu/lib/docker/quagga.py b/ryu/lib/docker/quagga.py
new file mode 100644
index 0000000..9b6d218
--- /dev/null
+++ b/ryu/lib/docker/quagga.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/quagga.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import netaddr
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/quagga'
+
+ def __init__(self, name, asn, router_id, ctn_image_name,
+ super(QuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ctn_image_name)
+ self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.zebra = zebra
+ self._create_config_debian()
+
+ w_time = super(QuaggaBGPContainer,
+ self).run(wait=wait,
w_time=self.WAIT_FOR_BOOT)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return w_time
+
+ rib = []
+ return self.get_global_rib_with_prefix(prefix, rf)
+
+ out = self.vtysh('show bgp {0} unicast'.format(rf),
config=False)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return rib
+
+ read_next = False
+
+ ibgp = False
+ line = line[2:]
+ line = line[1:]
+ ibgp = True
+ continue
+
+ elems = line.split()
+
+ read_next = True
+ prefix = elems[0]
+ continue
+ nexthop = elems[0]
+ prefix = elems[0]
+ nexthop = elems[1]
+ read_next = False
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'ibgp': ibgp})
+
+ return rib
+
+ rib = []
+
+ lines = [line.strip() for line in self.vtysh(
+ 'show bgp {0} unicast {1}'.format(rf, prefix),
+ config=False).split('\n')]
+
+ return rib
+
+ lines = lines[2:]
+
+ lines.pop(0) # another useless line
+ elif lines[0].startswith('Advertised to non peer-group
+ lines = lines[2:] # other useless lines
+ raise Exception('unknown output format
{0}'.format(lines))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ aspath = []
+ aspath = [int(asn) for asn in lines[0].split()]
+
+ nexthop = lines[1].split()[0].strip()
+ info = [s.strip(',') for s in lines[2].split()]
+ attrs = []
+ med = info[info.index('metric') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_
DISC,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ 'metric': int(med)})
+ localpref = info[info.index('localpref') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
+ 'value': int(localpref)})
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'aspath': aspath, 'attrs': attrs})
+
+ return rib
+
+ raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
+
+ info = [l.strip() for l in self.vtysh(
+ 'show bgp neighbors {0}'.format(neigh_addr),
+ config=False).split('\n')]
+
+ raise Exception('unknown format')
+
+ idx1 = info[0].index('BGP neighbor is ')
+ idx2 = info[0].index(',')
+ n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
+ idx1 = info[2].index('= ')
+ state = info[2][idx1 + len('= '):]
+ return base.BGP_FSM_IDLE
+ return base.BGP_FSM_ACTIVE
+ return base.BGP_FSM_ESTABLISHED
+ return state
+
+ raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ self.vtysh('clear ip bgp * soft', config=False)
+
+ zebra = 'no'
+ self._create_config_bgp()
+ zebra = 'yes'
+ self._create_config_zebra()
+ self._create_config_daemons(zebra)
+
+ c = base.CmdBuffer()
+ c << 'vtysh_enable=yes'
+ c << 'zebra_options=" --daemon -A 127.0.0.1"'
+ c << 'bgpd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospfd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospf6d_options=" --daemon -A ::1"'
+ c << 'ripd_options=" --daemon -A 127.0.0.1"'
+ c << 'ripngd_options=" --daemon -A ::1"'
+ c << 'isisd_options=" --daemon -A 127.0.0.1"'
+ c << 'babeld_options=" --daemon -A 127.0.0.1"'
+ c << 'watchquagga_enable=yes'
+ c << 'watchquagga_options=(--daemon)'
+ with open('{0}/debian.conf'.format(self.config_dir),
'w') as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'zebra=%s' % zebra
+ c << 'bgpd=yes'
+ c << 'ospfd=no'
+ c << 'ospf6d=no'
+ c << 'ripd=no'
+ c << 'ripngd=no'
+ c << 'isisd=no'
+ c << 'babeld=no'
+ with open('{0}/daemons'.format(self.config_dir), 'w') as
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+
+ c = base.CmdBuffer()
+ c << 'hostname bgpd'
+ c << 'password zebra'
+ c << 'router bgp {0}'.format(self.asn)
+ c << 'bgp router-id {0}'.format(self.router_id)
+ if any(info['graceful_restart'] for info in
+ c << 'bgp graceful-restart'
+
+ version = 4
+ version = netaddr.IPNetwork(info['neigh_
addr']).version
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << 'no bgp default ipv4-unicast'
+
+ c << 'neighbor {0} remote-as {1}'.format(n_addr,
peer.asn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'neighbor {0} route-server-client'.format(n_
addr)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'neighbor {0} route-map {1}
{2}'.format(n_addr,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
p['name'],
+ typ)
+ c << 'neighbor {0} password {1}'.format(n_addr,
info['passwd'])
+ c << 'neighbor {0} passive'.format(n_addr)
+ c << 'address-family ipv6 unicast'
+ c << 'neighbor {0} activate'.format(n_addr)
+ c << 'exit-address-family'
+
+ c << 'network {0}'.format(route['prefix'])
+ c << 'address-family ipv6 unicast'
+ c << 'network {0}'.format(route['prefix'])
+ c << 'exit-address-family'
+ raise Exception(
{0}'.format(route['rf']))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c << 'address-family ipv6 unicast'
+ c << 'redistribute connected'
+ c << 'exit-address-family'
+ c << 'redistribute connected'
+
+ c << 'access-list {0} {1} {2}'.format(name,
policy['type'],
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ policy['match'])
+ c << 'route-map {0} permit 10'.format(name)
+ c << 'match ip address {0}'.format(name)
+ c << 'set metric {0}'.format(policy['med'])
+
+ c << 'debug bgp as4'
+ c << 'debug bgp fsm'
+ c << 'debug bgp updates'
+ c << 'debug bgp events'
+ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
+
+ with open('{0}/bgpd.conf'.format(self.config_dir), 'w')
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'hostname zebra'
+ c << 'password zebra'
+ c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
+ c << 'debug zebra packet'
+ c << 'debug zebra kernel'
+ c << 'debug zebra rib'
+ c << ''
+
+ with open('{0}/zebra.conf'.format(self.config_dir), 'w')
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ cmd = [cmd]
+ cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
+ return self.exec_on_ctn(
+ "vtysh -d bgpd -c 'en' -c 'conf t' -c "
+ "'router bgp {0}' {1}".format(self.asn, cmd),
+ capture=True)
+ return self.exec_on_ctn("vtysh -d bgpd
{0}".format(cmd),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ capture=True)
+
+ daemon = []
+ daemon.append('bgpd')
+ daemon.append('zebra')
+ cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
+ self.exec_on_ctn(cmd, capture=True)
+
+
+ def __init__(self, name, config, ctn_image_name,
+ asn = None
+ router_id = None
+ line = line.strip()
+ asn = int(line[len('router bgp'):].strip())
+ router_id = line[len('bgp router-id'):].strip()
+ raise Exception('asn not in quagga config')
+ raise Exception('router-id not in quagga config')
+ self.config = config
+ super(RawQuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
ctn_image_name,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
zebra)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ with open(os.path.join(self.config_dir, 'bgpd.conf'),
'w')
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(self.config)
+ f.writelines(self.config)
diff --git a/ryu/lib/docker/ryubgp.py b/ryu/lib/docker/ryubgp.py
new file mode 100644
index 0000000..8fe16f4
--- /dev/null
+++ b/ryu/lib/docker/ryubgp.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import time
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/ryu'
+
+ super(RyuBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ctn_image_name)
+ self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
+ self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME,
'ryu.conf')
+ self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME,
'bgp_conf.py')
+ self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c = base.CmdBuffer()
+ c << '[DEFAULT]'
+ c << 'verbose=True'
+ c << 'log_file=/etc/ryu/manager.log'
+ LOG.info("[%s's new config]" % self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'import os'
+ c << ''
+ c << 'BGP = {'
+ c << " 'local_as': %s," % str(self.asn)
+ c << " 'router_id': '%s'," % self.router_id
+ c << " 'neighbors': ["
+ c << " {"
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << " 'address': '%s'," % n_addr
+ c << " 'remote_as': %s," % str(peer.asn)
+ c << " 'enable_ipv4': True,"
+ c << " 'enable_ipv6': True,"
+ c << " 'enable_vpnv4': True,"
+ c << " 'enable_vpnv6': True,"
+ c << ' },'
+ c << ' ],'
+ c << " 'routes': ["
+ c << " {"
+ c << " 'prefix': '%s'," % route['prefix']
+ c << " },"
+ c << " ],"
+ c << "}"
+ log_conf = """LOGGING = {
+
+ # We use python logging package for logging.
+ 'version': 1,
+ 'disable_existing_loggers': False,
+
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s ' +
+ '[%(process)d %(thread)d] %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(asctime)s %(module)s
%(lineno)s ' +
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ '%(message)s'
+ },
+ 'stats': {
+ 'format': '%(message)s'
+ },
+ },
+
+ 'handlers': {
+ # Outputs log to console.
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ 'console_stats': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'stats'
+ },
+ # Rotates log file when its size reaches 10MB.
+ 'log_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'bgpspeaker.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'verbose'
+ },
+ 'stats_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'statistics_bgps.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'stats'
+ },
+ },
+
+ # Fine-grained control of logging per instance.
+ 'loggers': {
+ 'bgpspeaker': {
+ 'handlers': ['console', 'log_file'],
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'stats': {
+ 'handlers': ['stats_file', 'console_stats'],
+ 'level': 'INFO',
+ 'propagate': False,
+ 'formatter': 'stats',
+ },
+ },
+
+ # Root loggers.
+ 'root': {
+ 'handlers': ['console', 'log_file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+}"""
+ c << log_conf
+ with open(os.path.join(self.config_dir, 'bgp_conf.py'),
'w')
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ self._create_config_ryu()
+ self._create_config_ryu_bgp()
+
+ results = self.exec_on_ctn('ps ax')
+ running = False
+ running = True
+ return running
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = "ryu-manager --verbose "
+ cmd += "--config-file %s " % self.SHARED_RYU_CONF
+ cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
+ cmd += "ryu.services.protocols.bgp.application"
+ self.exec_on_ctn(cmd, detach=True)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
+ self.exec_on_ctn(cmd)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ w_time = super(RyuBGPContainer,
+ self).run(wait=wait,
w_time=self.WAIT_FOR_BOOT)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return w_time
+
+ self.stop_ryubgp(retry=True)
+ self.start_ryubgp(retry=True)
diff --git a/ryu/tests/integrated/common/__init__.py
b/ryu/tests/integrated/common/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/ryu/tests/integrated/common/docker_base.py
b/ryu/tests/integrated/common/docker_base.py
deleted file mode 100644
index 1ae2cc2..0000000
--- a/ryu/tests/integrated/common/docker_base.py
+++ /dev/null
@@ -1,801 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/base.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import itertools
-import logging
-import os
-import subprocess
-import time
-
-import netaddr
-import six
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_TEST_PREFIX = ''
-DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
-TEST_PREFIX = DEFAULT_TEST_PREFIX
-TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
-
-BGP_FSM_IDLE = 'BGP_FSM_IDLE'
-BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
-BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
-
-BGP_ATTR_TYPE_ORIGIN = 1
-BGP_ATTR_TYPE_AS_PATH = 2
-BGP_ATTR_TYPE_NEXT_HOP = 3
-BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
-BGP_ATTR_TYPE_LOCAL_PREF = 5
-BGP_ATTR_TYPE_COMMUNITIES = 8
-BGP_ATTR_TYPE_ORIGINATOR_ID = 9
-BGP_ATTR_TYPE_CLUSTER_LIST = 10
-BGP_ATTR_TYPE_MP_REACH_NLRI = 14
-BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
-
-BRIDGE_TYPE_DOCKER = 'docker'
-BRIDGE_TYPE_BRCTL = 'brctl'
-BRIDGE_TYPE_OVS = 'ovs'
-
-
- super(CommandError, self).__init__()
- self.out = out
-
-
- e = RuntimeError()
- r = f()
- time.sleep(s)
- return r
- raise e
-
-
- super(CmdBuffer, self).__init__()
- self.delim = delim
-
- self.append(value)
-
- return self.delim.join(self)
-
-
-
- def __new__(cls, stdout, stderr, command, returncode,
- stdout = stdout or ''
- obj = super(CommandOut, cls).__new__(cls, stdout,
**kwargs)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- obj.stderr = stderr or ''
- obj.command = command
- obj.returncode = returncode
- return obj
-
-
-
- """Execute a command using subprocess.Popen()
- - out: stdout from subprocess.Popen()
- out has some attributes.
- out.returncode: returncode of subprocess.Popen()
- out.stderr: stderr from subprocess.Popen()
- """
- p_stdout = subprocess.PIPE
- p_stderr = subprocess.PIPE
- p_stdout = None
- p_stderr = None
- pop = subprocess.Popen(cmd, shell=True,
executable=executable,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- stdout=p_stdout,
- stderr=p_stderr)
- __stdout, __stderr = pop.communicate()
- _stdout = six.text_type(__stdout, 'utf-8')
- _stderr = six.text_type(__stderr, 'utf-8')
- out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
- return out
-
- out = None
- out = self._execute(cmd, capture=capture)
- LOG.info(out.command)
- return out
- LOG.error("stdout: %s", out)
- LOG.error("stderr: %s", out.stderr)
- break
- time.sleep(interval)
- raise CommandError(out)
-
- cmd = 'sudo %s' % cmd
- return self.execute(cmd, capture=capture,
- try_times=try_times,
interval=interval)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
-
- self.baseimage = baseimage
- self.cmd = Command()
-
- out = self.cmd.sudo('sudo docker images')
- images = []
- images.append(line.split()[0])
- return images
-
- return name in self.get_images()
-
- self.cmd.sudo(
- "docker build -t {0} {1}".format(tagname,
dockerfile_dir),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- try_times=3)
-
- return tagname
- self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
-
- def create_quagga(self, tagname='quagga', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- pkges = ' '.join([
- 'telnet',
- 'tcpdump',
- 'quagga',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'RUN apt-get update'
- c << 'RUN apt-get install -qy --no-install-recommends %s'
%
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
pkges
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'CMD /usr/lib/quagga/bgpd'
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.build(tagname, workdir)
- return tagname
-
- def create_ryu(self, tagname='ryu', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- workdir_ctn = '/root/osrg/ryu'
- pkges = ' '.join([
- 'tcpdump',
- 'iproute2',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'ADD ryu %s' % workdir_ctn
- install = ' '.join([
- 'RUN apt-get update',
- '&& apt-get install -qy --no-install-recommends %s' %
pkges,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- '&& cd %s' % workdir_ctn,
- # Note: Clean previous builds, because "python
setup.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
install"
- # might fail if the current directory contains the
symlink to
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- # Docker host file systems.
- '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
- '&& pip install -r tools/pip-requires -r
tools/optional-requires',
- '&& python setup.py install',
- ])
- c << install
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.cmd.execute('cp -r ../ryu %s/' % workdir)
- self.build(tagname, workdir)
- return tagname
-
-
- def __init__(self, name, subnet='', start_ip=None,
end_ip=None,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- with_ip=True, self_ip=False,
- fixed_ip=None, reuse=False,
- """Manage a bridge
- - name: bridge name
- - subnet: network cider to be used in this bridge
- - start_ip: start address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - end_ip: end address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - with_ip: specify if assign automatically an ip
address
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - self_ip: specify if assign an ip address for the
bridge
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - fixed_ip: an ip address to be assigned to the bridge
- - reuse: specify if use an existing bridge
- - br_type: One either in a 'docker', 'brctl' or 'ovs'
- """
- self.cmd = Command()
- self.name = name
- if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
- raise Exception("argument error br_type: %s" %
br_type)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.br_type = br_type
- self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
- self.name = '{0}_{1}'.format(TEST_PREFIX, name)
- self.with_ip = with_ip
- self.subnet = netaddr.IPNetwork(subnet)
- self.start_ip = start_ip
- self.start_ip = netaddr.IPAddress(self.subnet.
first)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.end_ip = end_ip
- self.end_ip = netaddr.IPAddress(self.subnet.last)
-
- for host in netaddr.IPRange(self.start_ip,
- yield host
- self._ip_generator = _ip_gen()
- # throw away first network address
- self.next_ip_address()
-
- self.self_ip = self_ip
- self.ip_addr = fixed_ip
- self.ip_addr = self.next_ip_address()
- gw = "--gateway %s" %
self.ip_addr.split('/')[0]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- v6 = ''
- v6 = '--ipv6'
- cmd = ("docker network create --driver bridge
%s "
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- "%s --subnet %s %s" % (v6, gw, subnet,
self.name))
- cmd = "ip link add {0} type bridge".format(
self.name)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- cmd = "ovs-vsctl add-br {0}".format(self.name
)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- raise ValueError('Unsupported br_type: %s' %
self.br_type)
- self.delete()
- self.execute(cmd, sudo=True, retry=True)
- try_several_times(f)
- self.execute("ip link set up dev {0}".format(
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- sudo=True, retry=True)
-
- ips = self.check_br_addr(self.name)
- self.execute(
- "ip addr del {0} dev {1}".format(ip,
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- sudo=True, retry=True)
- self.execute(
- "ip addr add {0} dev {1}".format(self.ip_addr,
self.name
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
),
- sudo=True, retry=True)
- self.ctns = []
-
- out = self.execute('docker network ls', sudo=True,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- bridges = []
- bridges.append(line.split()[1])
- return bridges
-
- out = self.execute('brctl show', retry=True)
- bridges = []
- bridges.append(line.split()[0])
- return bridges
-
- out = self.execute('ovs-vsctl list-br', sudo=True,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return out.splitlines()
-
- return self.get_bridges_dc()
- return self.get_bridges_brctl()
- return self.get_bridges_ovs()
-
- return self.name in self.get_bridges()
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3,
interval=1)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return m(cmd, capture=capture)
-
- ips = {}
- cmd = "ip a show dev %s" % br
- elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ips[4] = elems[1]
- elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ips[6] = elems[1]
- return ips
-
- return "{0}/{1}".format(next(self._ip_generator),
- self.subnet.prefixlen)
-
- name = ctn.next_if_name()
- self.ctns.append(ctn)
- ip_address = None
- ipv4 = None
- ipv6 = None
- ip_address = self.next_ip_address()
- ip_address_ip = ip_address.split('/')[0]
- version = 4
- version = 6
- opt_ip = "--ip %s" % ip_address_ip
- ipv4 = ip_address
- opt_ip = "--ip6 %s" % ip_address_ip
- ipv6 = ip_address
- cmd = "docker network connect %s %s %s" % (
- opt_ip, self.name, ctn.docker_name())
- self.execute(cmd, sudo=True)
- ctn.set_addr_info(bridge=self.name, ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ifname=name)
- ip_address = self.next_ip_address()
- version = 4
- version = 6
- ctn.pipework(self, ip_address, name,
version=version)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ctn.pipework(self, '0/0', name)
- return ip_address
-
- return
- self.execute("docker network rm %s" % self.name,
- sudo=True, retry=True)
- self.execute("ip link set down dev %s" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ip link delete %s type bridge" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ovs-vsctl del-br %s" % self.name,
- sudo=True, retry=True)
-
-
- self.name = name
- self.image = image
- self.shared_volumes = []
- self.ip_addrs = []
- self.ip6_addrs = []
- self.is_running = False
- self.eths = []
- self.id = None
-
- self.cmd = Command()
- self.remove()
-
- return self.name
- return '{0}_{1}'.format(TEST_PREFIX, self.name)
-
- return self.id
- return self.docker_name()
-
- name = 'eth{0}'.format(len(self.eths) + 1)
- self.eths.append(name)
- return name
-
- def set_addr_info(self, bridge, ipv4=None, ipv6=None,
- self.ip_addrs.append((ifname, ipv4, bridge))
- self.ip6_addrs.append((ifname, ipv6, bridge))
-
- addrinfo = {}
- ip_addrs = self.ip_addrs
- ip_addrs = self.ip6_addrs
- return None
- addrinfo[addr[1]] = addr[0]
- return addrinfo
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3,
interval=1)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return m(cmd, capture=capture)
-
- return self.cmd.sudo(cmd, capture=capture,
try_times=3,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
interval=1)
- return self.cmd.sudo(cmd, capture=capture)
-
- name = self.docker_name()
- flag = '-d' if detach else ''
- return self.dcexec('docker exec {0} {1} {2}'.format(
- flag, name, cmd), capture=capture)
-
- cmd = 'docker ps --no-trunc=true'
- cmd += ' --all=true'
- out = self.dcexec(cmd, retry=True)
- containers = []
- containers.append(line.split()[-1])
- return containers
-
- return self.docker_name() in self.get_containers(allctn=
allctn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c = CmdBuffer(' ')
- c << "docker run --privileged=true"
- c << "-v {0}:{1}".format(sv[0], sv[1])
- c << "--name {0} --hostname {0} -id
{1}".format(self.docker_name()
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
,
-
self.image)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.id = self.dcexec(str(c), retry=True)
- self.is_running = True
- self.exec_on_ctn("ip li set up dev lo")
- ipv4 = None
- ipv6 = None
- for line in self.exec_on_ctn("ip a show dev
- elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ipv4 = elems[1]
- elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ipv6 = elems[1]
- self.set_addr_info(bridge='docker0', ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ifname='eth0')
- return 0
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker stop -t 0 %s' % ctn_id,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.is_running = False
- return out
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- LOG.warning('Call run() before pipeworking')
- return
- c = CmdBuffer(' ')
- c << "pipework {0}".format(bridge.name)
-
- c << "-i {0}".format(intf_name)
- intf_name = "eth1"
- ipv4 = None
- ipv6 = None
- ipv4 = ip_addr
- c << '-a 6'
- ipv6 = ip_addr
- c << "{0} {1}".format(self.docker_name(), ip_addr)
- self.set_addr_info(bridge=bridge.name, ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ifname=intf_name)
- self.execute(str(c), sudo=True, retry=True)
-
- cmd = "docker inspect -f '{{.State.Pid}}' %s" %
self.docker_name()
- return int(self.dcexec(cmd))
- return -1
-
- interface = "eth0"
- filename = "{0}/{1}.dump".format(
- self.shared_volumes[0][1], interface)
- self.exec_on_ctn(
- "tcpdump -i {0} -w {1}".format(interface, filename),
- detach=True)
-
-
-
- WAIT_FOR_BOOT = 1
- RETRY_INTERVAL = 5
- DEFAULT_PEER_ARG = {'neigh_addr': '',
- 'passwd': None,
- 'vpn': False,
- 'flowspec': False,
- 'is_rs_client': False,
- 'is_rr_client': False,
- 'cluster_id': None,
- 'policies': None,
- 'passive': False,
- 'local_addr': '',
- 'as2': False,
- 'graceful_restart': None,
- 'local_as': None,
- 'prefix_limit': None}
- default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
- DEFAULT_ROUTE_ARG = {'prefix': None,
- 'rf': 'ipv4',
- 'attr': None,
- 'next-hop': None,
- 'as-path': None,
- 'community': None,
- 'med': None,
- 'local-pref': None,
- 'extended-community': None,
- 'matchs': None,
- 'thens': None}
- default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
-
- self.config_dir = TEST_BASE_DIR
- self.config_dir = os.path.join(self.config_dir,
TEST_PREFIX)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.config_dir = os.path.join(self.config_dir, name)
- self.asn = asn
- self.router_id = router_id
- self.peers = {}
- self.routes = {}
- self.policies = {}
- super(BGPContainer, self).__init__(name, ctn_image_name)
- self.execute(
- 'rm -rf {0}'.format(self.config_dir), sudo=True)
- self.execute('mkdir -p {0}'.format(self.config_dir))
- self.execute('chmod 777 {0}'.format(self.config_dir))
-
- return str({'name': self.name, 'asn': self.asn,
- 'router_id': self.router_id})
-
- self.create_config()
- super(BGPContainer, self).run()
- time.sleep(w_time)
- return w_time
-
- def add_peer(self, peer, bridge='', reload_config=True,
v6=False,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- peer_info = peer_info or {}
- self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
- self.peers[peer].update(peer_info)
- peer_keys = sorted(self.peers[peer].keys())
- raise Exception("argument error peer_info: %s" %
peer_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- neigh_addr = ''
- local_addr = ''
- it = itertools.product(self.ip_addrs, peer.ip_addrs)
- it = itertools.product(self.ip6_addrs,
peer.ip6_addrs)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- continue
- neigh_addr = you[1]
- local_addr = me[1]
- addr, mask = local_addr.split('/')
- local_addr = "{0}%{1}/{2}".format(addr, me[0],
mask)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- break
-
- raise Exception('peer {0} seems not ip
reachable'.format(peer))
-
- self.peers[peer]['policies'] = {}
-
- self.peers[peer]['neigh_addr'] = neigh_addr
- self.peers[peer]['local_addr'] = local_addr
- self.create_config()
- self.reload_config()
-
- del self.peers[peer]
- self.create_config()
- self.reload_config()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- return self.execute('cat {0}/*.log'.format(self.config_
dir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- def add_route(self, route, reload_config=True,
- route_info = route_info or {}
- self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
- self.routes[route].update(route_info)
- route_keys = sorted(self.routes[route].keys())
- raise Exception("argument error route_info: %s" %
route_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.routes[route]['prefix'] = route
- self.create_config()
- self.reload_config()
-
- def add_policy(self, policy, peer, typ, default='accept',
- self.set_default_policy(peer, typ, default)
- self.define_policy(policy)
- self.assign_policy(peer, policy, typ)
- self.create_config()
- self.reload_config()
-
- if (typ in ['in', 'out', 'import', 'export'] and
- self.peers[peer]['default-policy'] = {}
- self.peers[peer]['default-policy'][typ] = default
- raise Exception('wrong type or default')
-
- self.policies[policy['name']] = policy
-
- raise Exception('peer {0} not found'.format(peer.name
))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- name = policy['name']
- raise Exception('policy {0} not found'.format(name))
- self.peers[peer]['policies'][typ] = policy
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- version = netaddr.IPNetwork(prefix).version
- addr = prefix.split('/')[0]
- ping_cmd = 'ping'
- ping_cmd = 'ping6'
- raise Exception(
- 'unsupported route family: {0}'.format(version))
- cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs
echo"'.format(
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ping_cmd, addr)
- interval = 1
- count = 0
- res = self.exec_on_ctn(cmd)
- LOG.info(res)
- break
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
- return True
-
- interval = 1
- count = 0
- state = self.get_neighbor_state(peer)
- LOG.info("%s's peer %s state: %s",
- self.router_id, peer.router_id, state)
- return
-
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
-
- cmd = '/sbin/ip route add {0} via {1}'.format(network,
next_hop)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.exec_on_ctn(cmd)
-
- cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
- self.exec_on_ctn(cmd)
-
- raise NotImplementedError()
-
- raise NotImplementedError()
diff --git a/ryu/tests/integrated/common/
install_docker_test_pkg.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg.sh
deleted file mode 100644
index a771dfc..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-function add_docker_aptline {
- sudo apt-get update
- if ! apt-cache search docker-engine | grep docker-engine; then
- VER=`lsb_release -r`
- if echo $VER | grep 12.04; then
- REL_NAME=precise
- elif echo $VER | grep 14.04; then
- REL_NAME=trusty
- elif echo $VER | grep 15.10; then
- REL_NAME=wily
- elif echo $VER | grep 16.04; then
- REL_NAME=xenial
- else
- retrun 1
- fi
- RELEASE=ubuntu-$REL_NAME
- sudo apt-key adv --keyserver
hkp://p80.pool.sks-keyservers.
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
net:80
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
- sudo sh -c "echo deb https://apt.dockerproject.org/repo
$RELEASE
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
main > /etc/apt/sources.list.d/docker.list"
- fi
-}
-
-init_variables
-
-if [ $APTLINE_DOCKER -eq 1 ]; then
- add_docker_aptline
-fi
-
-sudo apt-get update
-if apt-cache search docker-engine | grep docker-engine; then
- DOCKER_PKG=docker-engine
-else
- DOCKER_PKG=docker.io
-fi
-sudo apt-get install -y $DOCKER_PKG
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_
common.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
deleted file mode 100644
index 44a3e10..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_
common.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-set -ex
-
-function init_variables {
- APTLINE_DOCKER=0
- DIR_BASE=/tmp
-}
-
-function process_options {
- local max
- local i
- max=$#
- i=1
- while [ $i -le $max ]; do
- case "$1" in
- -a|--add-docker-aptline)
- APTLINE_DOCKER=1
- ;;
- -d|--download-dir)
- shift; ((i++))
- DIR_BASE=$1
- ;;
- esac
- shift; ((i++))
- done
-}
-
-function install_pipework {
- if ! which /usr/local/bin/pipework >/dev/null
- then
- sudo rm -rf $DIR_BASE/pipework
- git clone https://github.com/jpetazzo/pipework.git
$DIR_BASE/pipework
- sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
- fi
-}
-
-function install_depends_pkg {
- install_pipework
-}
diff --git a/ryu/tests/integrated/common/
install_docker_test_pkg_for_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
deleted file mode 100644
index d8c3b49..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-init_variables
-
-sudo apt-get update
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/quagga.py
b/ryu/tests/integrated/common/quagga.py
deleted file mode 100644
index 9b6d218..0000000
--- a/ryu/tests/integrated/common/quagga.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/quagga.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-
-import netaddr
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/quagga'
-
- def __init__(self, name, asn, router_id, ctn_image_name,
- super(QuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ctn_image_name)
- self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.zebra = zebra
- self._create_config_debian()
-
- w_time = super(QuaggaBGPContainer,
- self).run(wait=wait,
w_time=self.WAIT_FOR_BOOT)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return w_time
-
- rib = []
- return self.get_global_rib_with_prefix(prefix, rf)
-
- out = self.vtysh('show bgp {0} unicast'.format(rf),
config=False)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return rib
-
- read_next = False
-
- ibgp = False
- line = line[2:]
- line = line[1:]
- ibgp = True
- continue
-
- elems = line.split()
-
- read_next = True
- prefix = elems[0]
- continue
- nexthop = elems[0]
- prefix = elems[0]
- nexthop = elems[1]
- read_next = False
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'ibgp': ibgp})
-
- return rib
-
- rib = []
-
- lines = [line.strip() for line in self.vtysh(
- 'show bgp {0} unicast {1}'.format(rf, prefix),
- config=False).split('\n')]
-
- return rib
-
- lines = lines[2:]
-
- lines.pop(0) # another useless line
- elif lines[0].startswith('Advertised to non peer-group
- lines = lines[2:] # other useless lines
- raise Exception('unknown output format
{0}'.format(lines))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- aspath = []
- aspath = [int(asn) for asn in lines[0].split()]
-
- nexthop = lines[1].split()[0].strip()
- info = [s.strip(',') for s in lines[2].split()]
- attrs = []
- med = info[info.index('metric') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_
DISC,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- 'metric': int(med)})
- localpref = info[info.index('localpref') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
- 'value': int(localpref)})
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'aspath': aspath, 'attrs': attrs})
-
- return rib
-
- raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
-
- info = [l.strip() for l in self.vtysh(
- 'show bgp neighbors {0}'.format(neigh_addr),
- config=False).split('\n')]
-
- raise Exception('unknown format')
-
- idx1 = info[0].index('BGP neighbor is ')
- idx2 = info[0].index(',')
- n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
- idx1 = info[2].index('= ')
- state = info[2][idx1 + len('= '):]
- return base.BGP_FSM_IDLE
- return base.BGP_FSM_ACTIVE
- return base.BGP_FSM_ESTABLISHED
- return state
-
- raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- self.vtysh('clear ip bgp * soft', config=False)
-
- zebra = 'no'
- self._create_config_bgp()
- zebra = 'yes'
- self._create_config_zebra()
- self._create_config_daemons(zebra)
-
- c = base.CmdBuffer()
- c << 'vtysh_enable=yes'
- c << 'zebra_options=" --daemon -A 127.0.0.1"'
- c << 'bgpd_options=" --daemon -A 127.0.0.1"'
- c << 'ospfd_options=" --daemon -A 127.0.0.1"'
- c << 'ospf6d_options=" --daemon -A ::1"'
- c << 'ripd_options=" --daemon -A 127.0.0.1"'
- c << 'ripngd_options=" --daemon -A ::1"'
- c << 'isisd_options=" --daemon -A 127.0.0.1"'
- c << 'babeld_options=" --daemon -A 127.0.0.1"'
- c << 'watchquagga_enable=yes'
- c << 'watchquagga_options=(--daemon)'
- with open('{0}/debian.conf'.format(self.config_dir),
'w') as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'zebra=%s' % zebra
- c << 'bgpd=yes'
- c << 'ospfd=no'
- c << 'ospf6d=no'
- c << 'ripd=no'
- c << 'ripngd=no'
- c << 'isisd=no'
- c << 'babeld=no'
- with open('{0}/daemons'.format(self.config_dir), 'w') as
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
-
- c = base.CmdBuffer()
- c << 'hostname bgpd'
- c << 'password zebra'
- c << 'router bgp {0}'.format(self.asn)
- c << 'bgp router-id {0}'.format(self.router_id)
- if any(info['graceful_restart'] for info in
- c << 'bgp graceful-restart'
-
- version = 4
- version = netaddr.IPNetwork(info['neigh_
addr']).version
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- n_addr = info['neigh_addr'].split('/')[0]
- c << 'no bgp default ipv4-unicast'
-
- c << 'neighbor {0} remote-as {1}'.format(n_addr,
peer.asn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'neighbor {0} route-server-client'.format(n_
addr)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'neighbor {0} route-map {1}
{2}'.format(n_addr,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
p['name'],
- typ)
- c << 'neighbor {0} password {1}'.format(n_addr,
info['passwd'])
- c << 'neighbor {0} passive'.format(n_addr)
- c << 'address-family ipv6 unicast'
- c << 'neighbor {0} activate'.format(n_addr)
- c << 'exit-address-family'
-
- c << 'network {0}'.format(route['prefix'])
- c << 'address-family ipv6 unicast'
- c << 'network {0}'.format(route['prefix'])
- c << 'exit-address-family'
- raise Exception(
{0}'.format(route['rf']))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c << 'address-family ipv6 unicast'
- c << 'redistribute connected'
- c << 'exit-address-family'
- c << 'redistribute connected'
-
- c << 'access-list {0} {1} {2}'.format(name,
policy['type'],
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- policy['match'])
- c << 'route-map {0} permit 10'.format(name)
- c << 'match ip address {0}'.format(name)
- c << 'set metric {0}'.format(policy['med'])
-
- c << 'debug bgp as4'
- c << 'debug bgp fsm'
- c << 'debug bgp updates'
- c << 'debug bgp events'
- c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
-
- with open('{0}/bgpd.conf'.format(self.config_dir), 'w')
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'hostname zebra'
- c << 'password zebra'
- c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
- c << 'debug zebra packet'
- c << 'debug zebra kernel'
- c << 'debug zebra rib'
- c << ''
-
- with open('{0}/zebra.conf'.format(self.config_dir), 'w')
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- cmd = [cmd]
- cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
- return self.exec_on_ctn(
- "vtysh -d bgpd -c 'en' -c 'conf t' -c "
- "'router bgp {0}' {1}".format(self.asn, cmd),
- capture=True)
- return self.exec_on_ctn("vtysh -d bgpd
{0}".format(cmd),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- capture=True)
-
- daemon = []
- daemon.append('bgpd')
- daemon.append('zebra')
- cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
- self.exec_on_ctn(cmd, capture=True)
-
-
- def __init__(self, name, config, ctn_image_name,
- asn = None
- router_id = None
- line = line.strip()
- asn = int(line[len('router bgp'):].strip())
- router_id = line[len('bgp router-id'):].strip()
- raise Exception('asn not in quagga config')
- raise Exception('router-id not in quagga config')
- self.config = config
- super(RawQuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
ctn_image_name,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
zebra)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- with open(os.path.join(self.config_dir, 'bgpd.conf'),
'w')
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(self.config)
- f.writelines(self.config)
diff --git a/ryu/tests/integrated/common/ryubgp.py
b/ryu/tests/integrated/common/ryubgp.py
deleted file mode 100644
index 8fe16f4..0000000
--- a/ryu/tests/integrated/common/ryubgp.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-import time
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/ryu'
-
- super(RyuBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ctn_image_name)
- self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
- self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME,
'ryu.conf')
- self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME,
'bgp_conf.py')
- self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c = base.CmdBuffer()
- c << '[DEFAULT]'
- c << 'verbose=True'
- c << 'log_file=/etc/ryu/manager.log'
- LOG.info("[%s's new config]" % self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'import os'
- c << ''
- c << 'BGP = {'
- c << " 'local_as': %s," % str(self.asn)
- c << " 'router_id': '%s'," % self.router_id
- c << " 'neighbors': ["
- c << " {"
- n_addr = info['neigh_addr'].split('/')[0]
- c << " 'address': '%s'," % n_addr
- c << " 'remote_as': %s," % str(peer.asn)
- c << " 'enable_ipv4': True,"
- c << " 'enable_ipv6': True,"
- c << " 'enable_vpnv4': True,"
- c << " 'enable_vpnv6': True,"
- c << ' },'
- c << ' ],'
- c << " 'routes': ["
- c << " {"
- c << " 'prefix': '%s'," % route['prefix']
- c << " },"
- c << " ],"
- c << "}"
- log_conf = """LOGGING = {
-
- # We use python logging package for logging.
- 'version': 1,
- 'disable_existing_loggers': False,
-
- 'formatters': {
- 'verbose': {
- 'format': '%(levelname)s %(asctime)s %(module)s ' +
- '[%(process)d %(thread)d] %(message)s'
- },
- 'simple': {
- 'format': '%(levelname)s %(asctime)s %(module)s
%(lineno)s ' +
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- '%(message)s'
- },
- 'stats': {
- 'format': '%(message)s'
- },
- },
-
- 'handlers': {
- # Outputs log to console.
- 'console': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'simple'
- },
- 'console_stats': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'stats'
- },
- # Rotates log file when its size reaches 10MB.
- 'log_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'bgpspeaker.log'),
- 'maxBytes': '10000000',
- 'formatter': 'verbose'
- },
- 'stats_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'statistics_bgps.log'),
- 'maxBytes': '10000000',
- 'formatter': 'stats'
- },
- },
-
- # Fine-grained control of logging per instance.
- 'loggers': {
- 'bgpspeaker': {
- 'handlers': ['console', 'log_file'],
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'stats': {
- 'handlers': ['stats_file', 'console_stats'],
- 'level': 'INFO',
- 'propagate': False,
- 'formatter': 'stats',
- },
- },
-
- # Root loggers.
- 'root': {
- 'handlers': ['console', 'log_file'],
- 'level': 'DEBUG',
- 'propagate': True,
- },
-}"""
- c << log_conf
- with open(os.path.join(self.config_dir, 'bgp_conf.py'),
'w')
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- self._create_config_ryu()
- self._create_config_ryu_bgp()
-
- results = self.exec_on_ctn('ps ax')
- running = False
- running = True
- return running
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = "ryu-manager --verbose "
- cmd += "--config-file %s " % self.SHARED_RYU_CONF
- cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
- cmd += "ryu.services.protocols.bgp.application"
- self.exec_on_ctn(cmd, detach=True)
- result = True
- break
- time.sleep(1)
- return result
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
- self.exec_on_ctn(cmd)
- result = True
- break
- time.sleep(1)
- return result
-
- w_time = super(RyuBGPContainer,
- self).run(wait=wait,
w_time=self.WAIT_FOR_BOOT)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return w_time
-
- self.stop_ryubgp(retry=True)
- self.start_ryubgp(retry=True)
diff --git a/tests/integrated/bgp/base.py
b/tests/integrated/bgp/base.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
index 26fa396..2f210de 100644
--- a/tests/integrated/bgp/base.py
+++ b/tests/integrated/bgp/base.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/base_ip6.py
b/tests/integrated/bgp/base_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
ip6.py
index be26faf..d867920 100644
--- a/tests/integrated/bgp/base_ip6.py
+++ b/tests/integrated/bgp/base_ip6.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/test_basic.py
b/tests/integrated/bgp/test_basic.py
index 5817d44..d1eda39 100644
--- a/tests/integrated/bgp/test_basic.py
+++ b/tests/integrated/bgp/test_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base
diff --git a/tests/integrated/bgp/test_ip6_basic.py
b/tests/integrated/bgp/test_ip6_basic.py
index 40461a5..911a0b5 100644
--- a/tests/integrated/bgp/test_ip6_basic.py
+++ b/tests/integrated/bgp/test_ip6_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base_ip6 as base
--
2.7.4
------------------------------------------------------------
------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
Ryu-devel mailing list
https://lists.sourceforge.net/lists/listinfo/ryu-devel
--
--
fumihiko kakuma
2017-07-13 04:58:40 UTC
Permalink
On Thu, 13 Jul 2017 13:15:46 +0900
Post by Takashi YAMAMOTO
Post by FUJITA Tomonori
On Thu, 13 Jul 2017 09:15:25 +0900
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Hi Yamamoto-San,
On Wed, 12 Jul 2017 23:43:25 +0900
Post by Takashi YAMAMOTO
this breaks openstack neutron_dynamic_routing, doesn't it?
I thank for advice. I know that.
So as soon as the ryu version with this commit is released,
I am going to push a patch to fix a breake to neutron-dynamic-routing.
is it difficult to avoid breaking it?
eg. make it try-import new and old locations.
This change only breaks scenario tests in neutron-dynamic-routing
and currently they are non-voting job.
Don't you like it?
i don't like any breaking changes.
import errors in tempest plugins is often a disaster.
Hmm, it seems not to affect other projects.
Anyway, I think this change will not be merged, as fujita-san also
don't like any breaks.
Post by Takashi YAMAMOTO
Post by FUJITA Tomonori
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Thanks,
Kakuma
Post by Takashi YAMAMOTO
On Mon, Jul 10, 2017 at 12:51 PM, fumihiko kakuma <
Post by fumihiko kakuma
This patch requires the v2 patch of Iwase-San.
---
.travis.yml | 2 +-
ryu/lib/docker/__init__.py | 0
ryu/lib/docker/docker_base.py | 801
+++++++++++++++++++++
ryu/lib/docker/install_docker_test_pkg.sh | 43 ++
ryu/lib/docker/install_docker_test_pkg_common.sh | 39 +
.../docker/install_docker_test_pkg_for_travis.sh | 12 +
ryu/lib/docker/quagga.py | 332 +++++++++
ryu/lib/docker/ryubgp.py | 212 ++++++
ryu/tests/integrated/common/__init__.py | 0
ryu/tests/integrated/common/docker_base.py | 801
---------------------
.../integrated/common/install_docker_test_pkg.sh | 43 --
.../common/install_docker_test_pkg_common.sh | 39 -
.../common/install_docker_test_pkg_for_travis.sh | 12 -
ryu/tests/integrated/common/quagga.py | 332
---------
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
ryu/tests/integrated/common/ryubgp.py | 212 ------
tests/integrated/bgp/base.py | 6 +-
tests/integrated/bgp/base_ip6.py | 6 +-
tests/integrated/bgp/test_basic.py | 2 +-
tests/integrated/bgp/test_ip6_basic.py | 2 +-
19 files changed, 1448 insertions(+), 1448 deletions(-)
create mode 100644 ryu/lib/docker/__init__.py
create mode 100644 ryu/lib/docker/docker_base.py
create mode 100644 ryu/lib/docker/install_docker_test_pkg.sh
create mode 100644 ryu/lib/docker/install_docker_
test_pkg_common.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
create mode 100644 ryu/lib/docker/install_docker_
test_pkg_for_travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
create mode 100644 ryu/lib/docker/quagga.py
create mode 100644 ryu/lib/docker/ryubgp.py
delete mode 100644 ryu/tests/integrated/common/__init__.py
delete mode 100644 ryu/tests/integrated/common/docker_base.py
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
common.sh
delete mode 100644 ryu/tests/integrated/common/
install_docker_test_pkg_for_travis.sh
delete mode 100644 ryu/tests/integrated/common/quagga.py
delete mode 100644 ryu/tests/integrated/common/ryubgp.py
diff --git a/.travis.yml b/.travis.yml
index 9e5474a..cd35aac 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,7 +16,7 @@ sudo: required # Required to enable Docker
service
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- pip install tox coveralls
- - bash ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
+ - bash ryu/lib/docker/install_docker_test_pkg_for_travis.sh
- NOSE_VERBOSE=0 tox -e $TOX_ENV
diff --git a/ryu/lib/docker/__init__.py
b/ryu/lib/docker/__init__.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
new file mode 100644
index 0000000..e69de29
diff --git a/ryu/lib/docker/docker_base.py
b/ryu/lib/docker/docker_base.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
new file mode 100644
index 0000000..1ae2cc2
--- /dev/null
+++ b/ryu/lib/docker/docker_base.py
@@ -0,0 +1,801 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/base.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import itertools
+import logging
+import os
+import subprocess
+import time
+
+import netaddr
+import six
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TEST_PREFIX = ''
+DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
+TEST_PREFIX = DEFAULT_TEST_PREFIX
+TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
+
+BGP_FSM_IDLE = 'BGP_FSM_IDLE'
+BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
+BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
+
+BGP_ATTR_TYPE_ORIGIN = 1
+BGP_ATTR_TYPE_AS_PATH = 2
+BGP_ATTR_TYPE_NEXT_HOP = 3
+BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
+BGP_ATTR_TYPE_LOCAL_PREF = 5
+BGP_ATTR_TYPE_COMMUNITIES = 8
+BGP_ATTR_TYPE_ORIGINATOR_ID = 9
+BGP_ATTR_TYPE_CLUSTER_LIST = 10
+BGP_ATTR_TYPE_MP_REACH_NLRI = 14
+BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+
+BRIDGE_TYPE_DOCKER = 'docker'
+BRIDGE_TYPE_BRCTL = 'brctl'
+BRIDGE_TYPE_OVS = 'ovs'
+
+
+ super(CommandError, self).__init__()
+ self.out = out
+
+
+ e = RuntimeError()
+ r = f()
+ time.sleep(s)
+ return r
+ raise e
+
+
+ super(CmdBuffer, self).__init__()
+ self.delim = delim
+
+ self.append(value)
+
+ return self.delim.join(self)
+
+
+
+ def __new__(cls, stdout, stderr, command, returncode,
+ stdout = stdout or ''
+ obj = super(CommandOut, cls).__new__(cls, stdout,
**kwargs)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ obj.stderr = stderr or ''
+ obj.command = command
+ obj.returncode = returncode
+ return obj
+
+
+
+ """Execute a command using subprocess.Popen()
+ - out: stdout from subprocess.Popen()
+ out has some attributes.
+ out.returncode: returncode of subprocess.Popen()
+ out.stderr: stderr from subprocess.Popen()
+ """
+ p_stdout = subprocess.PIPE
+ p_stderr = subprocess.PIPE
+ p_stdout = None
+ p_stderr = None
+ pop = subprocess.Popen(cmd, shell=True,
executable=executable,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ stdout=p_stdout,
+ stderr=p_stderr)
+ __stdout, __stderr = pop.communicate()
+ _stdout = six.text_type(__stdout, 'utf-8')
+ _stderr = six.text_type(__stderr, 'utf-8')
+ out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
+ return out
+
+ out = None
+ out = self._execute(cmd, capture=capture)
+ LOG.info(out.command)
+ return out
+ LOG.error("stdout: %s", out)
+ LOG.error("stderr: %s", out.stderr)
+ break
+ time.sleep(interval)
+ raise CommandError(out)
+
+ cmd = 'sudo %s' % cmd
+ return self.execute(cmd, capture=capture,
+ try_times=try_times,
interval=interval)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+
+ self.baseimage = baseimage
+ self.cmd = Command()
+
+ out = self.cmd.sudo('sudo docker images')
+ images = []
+ images.append(line.split()[0])
+ return images
+
+ return name in self.get_images()
+
+ self.cmd.sudo(
+ "docker build -t {0} {1}".format(tagname,
dockerfile_dir),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ try_times=3)
+
+ return tagname
+ self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
+
+ def create_quagga(self, tagname='quagga', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ pkges = ' '.join([
+ 'telnet',
+ 'tcpdump',
+ 'quagga',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'RUN apt-get update'
+ c << 'RUN apt-get install -qy --no-install-recommends %s'
%
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
pkges
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'CMD /usr/lib/quagga/bgpd'
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.build(tagname, workdir)
+ return tagname
+
+ def create_ryu(self, tagname='ryu', image=None,
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ workdir_ctn = '/root/osrg/ryu'
+ pkges = ' '.join([
+ 'tcpdump',
+ 'iproute2',
+ ])
+ use_image = image
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'ADD ryu %s' % workdir_ctn
+ install = ' '.join([
+ 'RUN apt-get update',
+ '&& apt-get install -qy --no-install-recommends %s' %
pkges,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ '&& cd %s' % workdir_ctn,
+ # Note: Clean previous builds, because "python
setup.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
install"
+ # might fail if the current directory contains the
symlink to
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ # Docker host file systems.
+ '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
+ '&& pip install -r tools/pip-requires -r
tools/optional-requires',
+ '&& python setup.py install',
+ ])
+ c << install
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.cmd.execute('cp -r ../ryu %s/' % workdir)
+ self.build(tagname, workdir)
+ return tagname
+
+
+ def __init__(self, name, subnet='', start_ip=None,
end_ip=None,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ with_ip=True, self_ip=False,
+ fixed_ip=None, reuse=False,
+ """Manage a bridge
+ - name: bridge name
+ - subnet: network cider to be used in this bridge
+ - start_ip: start address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - end_ip: end address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - with_ip: specify if assign automatically an ip
address
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - self_ip: specify if assign an ip address for the
bridge
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ - fixed_ip: an ip address to be assigned to the bridge
+ - reuse: specify if use an existing bridge
+ - br_type: One either in a 'docker', 'brctl' or 'ovs'
+ """
+ self.cmd = Command()
+ self.name = name
+ if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
+ raise Exception("argument error br_type: %s" %
br_type)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.br_type = br_type
+ self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
+ self.name = '{0}_{1}'.format(TEST_PREFIX, name)
+ self.with_ip = with_ip
+ self.subnet = netaddr.IPNetwork(subnet)
+ self.start_ip = start_ip
+ self.start_ip = netaddr.IPAddress(self.subnet.
first)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.end_ip = end_ip
+ self.end_ip = netaddr.IPAddress(self.subnet.last)
+
+ for host in netaddr.IPRange(self.start_ip,
+ yield host
+ self._ip_generator = _ip_gen()
+ # throw away first network address
+ self.next_ip_address()
+
+ self.self_ip = self_ip
+ self.ip_addr = fixed_ip
+ self.ip_addr = self.next_ip_address()
+ gw = "--gateway %s" %
self.ip_addr.split('/')[0]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ v6 = ''
+ v6 = '--ipv6'
+ cmd = ("docker network create --driver bridge
%s "
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ "%s --subnet %s %s" % (v6, gw, subnet,
self.name))
+ cmd = "ip link add {0} type bridge".format(
self.name)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ cmd = "ovs-vsctl add-br {0}".format(self.name
)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ raise ValueError('Unsupported br_type: %s' %
self.br_type)
+ self.delete()
+ self.execute(cmd, sudo=True, retry=True)
+ try_several_times(f)
+ self.execute("ip link set up dev {0}".format(
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ sudo=True, retry=True)
+
+ ips = self.check_br_addr(self.name)
+ self.execute(
+ "ip addr del {0} dev {1}".format(ip,
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ sudo=True, retry=True)
+ self.execute(
+ "ip addr add {0} dev {1}".format(self.ip_addr,
self.name
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
),
+ sudo=True, retry=True)
+ self.ctns = []
+
+ out = self.execute('docker network ls', sudo=True,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ bridges = []
+ bridges.append(line.split()[1])
+ return bridges
+
+ out = self.execute('brctl show', retry=True)
+ bridges = []
+ bridges.append(line.split()[0])
+ return bridges
+
+ out = self.execute('ovs-vsctl list-br', sudo=True,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return out.splitlines()
+
+ return self.get_bridges_dc()
+ return self.get_bridges_brctl()
+ return self.get_bridges_ovs()
+
+ return self.name in self.get_bridges()
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3,
interval=1)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return m(cmd, capture=capture)
+
+ ips = {}
+ cmd = "ip a show dev %s" % br
+ elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ips[4] = elems[1]
+ elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ips[6] = elems[1]
+ return ips
+
+ return "{0}/{1}".format(next(self._ip_generator),
+ self.subnet.prefixlen)
+
+ name = ctn.next_if_name()
+ self.ctns.append(ctn)
+ ip_address = None
+ ipv4 = None
+ ipv6 = None
+ ip_address = self.next_ip_address()
+ ip_address_ip = ip_address.split('/')[0]
+ version = 4
+ version = 6
+ opt_ip = "--ip %s" % ip_address_ip
+ ipv4 = ip_address
+ opt_ip = "--ip6 %s" % ip_address_ip
+ ipv6 = ip_address
+ cmd = "docker network connect %s %s %s" % (
+ opt_ip, self.name, ctn.docker_name())
+ self.execute(cmd, sudo=True)
+ ctn.set_addr_info(bridge=self.name, ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ifname=name)
+ ip_address = self.next_ip_address()
+ version = 4
+ version = 6
+ ctn.pipework(self, ip_address, name,
version=version)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ctn.pipework(self, '0/0', name)
+ return ip_address
+
+ return
+ self.execute("docker network rm %s" % self.name,
+ sudo=True, retry=True)
+ self.execute("ip link set down dev %s" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ip link delete %s type bridge" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ovs-vsctl del-br %s" % self.name,
+ sudo=True, retry=True)
+
+
+ self.name = name
+ self.image = image
+ self.shared_volumes = []
+ self.ip_addrs = []
+ self.ip6_addrs = []
+ self.is_running = False
+ self.eths = []
+ self.id = None
+
+ self.cmd = Command()
+ self.remove()
+
+ return self.name
+ return '{0}_{1}'.format(TEST_PREFIX, self.name)
+
+ return self.id
+ return self.docker_name()
+
+ name = 'eth{0}'.format(len(self.eths) + 1)
+ self.eths.append(name)
+ return name
+
+ def set_addr_info(self, bridge, ipv4=None, ipv6=None,
+ self.ip_addrs.append((ifname, ipv4, bridge))
+ self.ip6_addrs.append((ifname, ipv6, bridge))
+
+ addrinfo = {}
+ ip_addrs = self.ip_addrs
+ ip_addrs = self.ip6_addrs
+ return None
+ addrinfo[addr[1]] = addr[0]
+ return addrinfo
+
+ m = self.cmd.sudo
+ m = self.cmd.execute
+ return m(cmd, capture=capture, try_times=3,
interval=1)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return m(cmd, capture=capture)
+
+ return self.cmd.sudo(cmd, capture=capture,
try_times=3,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
interval=1)
+ return self.cmd.sudo(cmd, capture=capture)
+
+ name = self.docker_name()
+ flag = '-d' if detach else ''
+ return self.dcexec('docker exec {0} {1} {2}'.format(
+ flag, name, cmd), capture=capture)
+
+ cmd = 'docker ps --no-trunc=true'
+ cmd += ' --all=true'
+ out = self.dcexec(cmd, retry=True)
+ containers = []
+ containers.append(line.split()[-1])
+ return containers
+
+ return self.docker_name() in self.get_containers(allctn=
allctn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c = CmdBuffer(' ')
+ c << "docker run --privileged=true"
+ c << "-v {0}:{1}".format(sv[0], sv[1])
+ c << "--name {0} --hostname {0} -id
{1}".format(self.docker_name()
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
,
+
self.image)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.id = self.dcexec(str(c), retry=True)
+ self.is_running = True
+ self.exec_on_ctn("ip li set up dev lo")
+ ipv4 = None
+ ipv6 = None
+ for line in self.exec_on_ctn("ip a show dev
+ elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ipv4 = elems[1]
+ elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ipv6 = elems[1]
+ self.set_addr_info(bridge='docker0', ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ifname='eth0')
+ return 0
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker stop -t 0 %s' % ctn_id,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.is_running = False
+ return out
+
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ LOG.warning('Call run() before pipeworking')
+ return
+ c = CmdBuffer(' ')
+ c << "pipework {0}".format(bridge.name)
+
+ c << "-i {0}".format(intf_name)
+ intf_name = "eth1"
+ ipv4 = None
+ ipv6 = None
+ ipv4 = ip_addr
+ c << '-a 6'
+ ipv6 = ip_addr
+ c << "{0} {1}".format(self.docker_name(), ip_addr)
+ self.set_addr_info(bridge=bridge.name, ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ifname=intf_name)
+ self.execute(str(c), sudo=True, retry=True)
+
+ cmd = "docker inspect -f '{{.State.Pid}}' %s" %
self.docker_name()
+ return int(self.dcexec(cmd))
+ return -1
+
+ interface = "eth0"
+ filename = "{0}/{1}.dump".format(
+ self.shared_volumes[0][1], interface)
+ self.exec_on_ctn(
+ "tcpdump -i {0} -w {1}".format(interface, filename),
+ detach=True)
+
+
+
+ WAIT_FOR_BOOT = 1
+ RETRY_INTERVAL = 5
+ DEFAULT_PEER_ARG = {'neigh_addr': '',
+ 'passwd': None,
+ 'vpn': False,
+ 'flowspec': False,
+ 'is_rs_client': False,
+ 'is_rr_client': False,
+ 'cluster_id': None,
+ 'policies': None,
+ 'passive': False,
+ 'local_addr': '',
+ 'as2': False,
+ 'graceful_restart': None,
+ 'local_as': None,
+ 'prefix_limit': None}
+ default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
+ DEFAULT_ROUTE_ARG = {'prefix': None,
+ 'rf': 'ipv4',
+ 'attr': None,
+ 'next-hop': None,
+ 'as-path': None,
+ 'community': None,
+ 'med': None,
+ 'local-pref': None,
+ 'extended-community': None,
+ 'matchs': None,
+ 'thens': None}
+ default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
+
+ self.config_dir = TEST_BASE_DIR
+ self.config_dir = os.path.join(self.config_dir,
TEST_PREFIX)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.config_dir = os.path.join(self.config_dir, name)
+ self.asn = asn
+ self.router_id = router_id
+ self.peers = {}
+ self.routes = {}
+ self.policies = {}
+ super(BGPContainer, self).__init__(name, ctn_image_name)
+ self.execute(
+ 'rm -rf {0}'.format(self.config_dir), sudo=True)
+ self.execute('mkdir -p {0}'.format(self.config_dir))
+ self.execute('chmod 777 {0}'.format(self.config_dir))
+
+ return str({'name': self.name, 'asn': self.asn,
+ 'router_id': self.router_id})
+
+ self.create_config()
+ super(BGPContainer, self).run()
+ time.sleep(w_time)
+ return w_time
+
+ def add_peer(self, peer, bridge='', reload_config=True,
v6=False,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ peer_info = peer_info or {}
+ self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
+ self.peers[peer].update(peer_info)
+ peer_keys = sorted(self.peers[peer].keys())
+ raise Exception("argument error peer_info: %s" %
peer_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ neigh_addr = ''
+ local_addr = ''
+ it = itertools.product(self.ip_addrs, peer.ip_addrs)
+ it = itertools.product(self.ip6_addrs,
peer.ip6_addrs)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ continue
+ neigh_addr = you[1]
+ local_addr = me[1]
+ addr, mask = local_addr.split('/')
+ local_addr = "{0}%{1}/{2}".format(addr, me[0],
mask)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ break
+
+ raise Exception('peer {0} seems not ip
reachable'.format(peer))
+
+ self.peers[peer]['policies'] = {}
+
+ self.peers[peer]['neigh_addr'] = neigh_addr
+ self.peers[peer]['local_addr'] = local_addr
+ self.create_config()
+ self.reload_config()
+
+ del self.peers[peer]
+ self.create_config()
+ self.reload_config()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ return self.execute('cat {0}/*.log'.format(self.config_
dir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ def add_route(self, route, reload_config=True,
+ route_info = route_info or {}
+ self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
+ self.routes[route].update(route_info)
+ route_keys = sorted(self.routes[route].keys())
+ raise Exception("argument error route_info: %s" %
route_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.routes[route]['prefix'] = route
+ self.create_config()
+ self.reload_config()
+
+ def add_policy(self, policy, peer, typ, default='accept',
+ self.set_default_policy(peer, typ, default)
+ self.define_policy(policy)
+ self.assign_policy(peer, policy, typ)
+ self.create_config()
+ self.reload_config()
+
+ if (typ in ['in', 'out', 'import', 'export'] and
+ self.peers[peer]['default-policy'] = {}
+ self.peers[peer]['default-policy'][typ] = default
+ raise Exception('wrong type or default')
+
+ self.policies[policy['name']] = policy
+
+ raise Exception('peer {0} not found'.format(peer.name
))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ name = policy['name']
+ raise Exception('policy {0} not found'.format(name))
+ self.peers[peer]['policies'][typ] = policy
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
+
+ version = netaddr.IPNetwork(prefix).version
+ addr = prefix.split('/')[0]
+ ping_cmd = 'ping'
+ ping_cmd = 'ping6'
+ raise Exception(
+ 'unsupported route family: {0}'.format(version))
+ cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs
echo"'.format(
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ping_cmd, addr)
+ interval = 1
+ count = 0
+ res = self.exec_on_ctn(cmd)
+ LOG.info(res)
+ break
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+ return True
+
+ interval = 1
+ count = 0
+ state = self.get_neighbor_state(peer)
+ LOG.info("%s's peer %s state: %s",
+ self.router_id, peer.router_id, state)
+ return
+
+ time.sleep(interval)
+ count += interval
+ raise Exception('timeout')
+
+ cmd = '/sbin/ip route add {0} via {1}'.format(network,
next_hop)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.exec_on_ctn(cmd)
+
+ cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
+ self.exec_on_ctn(cmd)
+
+ raise NotImplementedError()
+
+ raise NotImplementedError()
diff --git a/ryu/lib/docker/install_docker_test_pkg.sh
b/ryu/lib/docker/install_docker_test_pkg.sh
new file mode 100644
index 0000000..a771dfc
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+function add_docker_aptline {
+ sudo apt-get update
+ if ! apt-cache search docker-engine | grep docker-engine; then
+ VER=`lsb_release -r`
+ if echo $VER | grep 12.04; then
+ REL_NAME=precise
+ elif echo $VER | grep 14.04; then
+ REL_NAME=trusty
+ elif echo $VER | grep 15.10; then
+ REL_NAME=wily
+ elif echo $VER | grep 16.04; then
+ REL_NAME=xenial
+ else
+ retrun 1
+ fi
+ RELEASE=ubuntu-$REL_NAME
+ sudo apt-key adv --keyserver
hkp://p80.pool.sks-keyservers.
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
net:80
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+ sudo sh -c "echo deb https://apt.dockerproject.org/repo
$RELEASE
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
main > /etc/apt/sources.list.d/docker.list"
+ fi
+}
+
+init_variables
+
+if [ $APTLINE_DOCKER -eq 1 ]; then
+ add_docker_aptline
+fi
+
+sudo apt-get update
+if apt-cache search docker-engine | grep docker-engine; then
+ DOCKER_PKG=docker-engine
+else
+ DOCKER_PKG=docker.io
+fi
+sudo apt-get install -y $DOCKER_PKG
+install_depends_pkg
diff --git a/ryu/lib/docker/install_docker_test_pkg_common.sh
b/ryu/lib/docker/install_docker_test_pkg_common.sh
new file mode 100644
index 0000000..44a3e10
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_common.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -ex
+
+function init_variables {
+ APTLINE_DOCKER=0
+ DIR_BASE=/tmp
+}
+
+function process_options {
+ local max
+ local i
+ max=$#
+ i=1
+ while [ $i -le $max ]; do
+ case "$1" in
+ -a|--add-docker-aptline)
+ APTLINE_DOCKER=1
+ ;;
+ -d|--download-dir)
+ shift; ((i++))
+ DIR_BASE=$1
+ ;;
+ esac
+ shift; ((i++))
+ done
+}
+
+function install_pipework {
+ if ! which /usr/local/bin/pipework >/dev/null
+ then
+ sudo rm -rf $DIR_BASE/pipework
+ git clone https://github.com/jpetazzo/pipework.git
$DIR_BASE/pipework
+ sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
+ fi
+}
+
+function install_depends_pkg {
+ install_pipework
+}
diff --git a/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
new file mode 100644
index 0000000..d8c3b49
--- /dev/null
+++ b/ryu/lib/docker/install_docker_test_pkg_for_travis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+init_variables
+
+sudo apt-get update
+install_depends_pkg
diff --git a/ryu/lib/docker/quagga.py b/ryu/lib/docker/quagga.py
new file mode 100644
index 0000000..9b6d218
--- /dev/null
+++ b/ryu/lib/docker/quagga.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/quagga.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import netaddr
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/quagga'
+
+ def __init__(self, name, asn, router_id, ctn_image_name,
+ super(QuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ctn_image_name)
+ self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ self.zebra = zebra
+ self._create_config_debian()
+
+ w_time = super(QuaggaBGPContainer,
+ self).run(wait=wait,
w_time=self.WAIT_FOR_BOOT)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return w_time
+
+ rib = []
+ return self.get_global_rib_with_prefix(prefix, rf)
+
+ out = self.vtysh('show bgp {0} unicast'.format(rf),
config=False)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return rib
+
+ read_next = False
+
+ ibgp = False
+ line = line[2:]
+ line = line[1:]
+ ibgp = True
+ continue
+
+ elems = line.split()
+
+ read_next = True
+ prefix = elems[0]
+ continue
+ nexthop = elems[0]
+ prefix = elems[0]
+ nexthop = elems[1]
+ read_next = False
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'ibgp': ibgp})
+
+ return rib
+
+ rib = []
+
+ lines = [line.strip() for line in self.vtysh(
+ 'show bgp {0} unicast {1}'.format(rf, prefix),
+ config=False).split('\n')]
+
+ return rib
+
+ lines = lines[2:]
+
+ lines.pop(0) # another useless line
+ elif lines[0].startswith('Advertised to non peer-group
+ lines = lines[2:] # other useless lines
+ raise Exception('unknown output format
{0}'.format(lines))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ aspath = []
+ aspath = [int(asn) for asn in lines[0].split()]
+
+ nexthop = lines[1].split()[0].strip()
+ info = [s.strip(',') for s in lines[2].split()]
+ attrs = []
+ med = info[info.index('metric') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_
DISC,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ 'metric': int(med)})
+ localpref = info[info.index('localpref') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
+ 'value': int(localpref)})
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'aspath': aspath, 'attrs': attrs})
+
+ return rib
+
+ raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
+
+ info = [l.strip() for l in self.vtysh(
+ 'show bgp neighbors {0}'.format(neigh_addr),
+ config=False).split('\n')]
+
+ raise Exception('unknown format')
+
+ idx1 = info[0].index('BGP neighbor is ')
+ idx2 = info[0].index(',')
+ n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
+ idx1 = info[2].index('= ')
+ state = info[2][idx1 + len('= '):]
+ return base.BGP_FSM_IDLE
+ return base.BGP_FSM_ACTIVE
+ return base.BGP_FSM_ESTABLISHED
+ return state
+
+ raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ self.vtysh('clear ip bgp * soft', config=False)
+
+ zebra = 'no'
+ self._create_config_bgp()
+ zebra = 'yes'
+ self._create_config_zebra()
+ self._create_config_daemons(zebra)
+
+ c = base.CmdBuffer()
+ c << 'vtysh_enable=yes'
+ c << 'zebra_options=" --daemon -A 127.0.0.1"'
+ c << 'bgpd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospfd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospf6d_options=" --daemon -A ::1"'
+ c << 'ripd_options=" --daemon -A 127.0.0.1"'
+ c << 'ripngd_options=" --daemon -A ::1"'
+ c << 'isisd_options=" --daemon -A 127.0.0.1"'
+ c << 'babeld_options=" --daemon -A 127.0.0.1"'
+ c << 'watchquagga_enable=yes'
+ c << 'watchquagga_options=(--daemon)'
+ with open('{0}/debian.conf'.format(self.config_dir),
'w') as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'zebra=%s' % zebra
+ c << 'bgpd=yes'
+ c << 'ospfd=no'
+ c << 'ospf6d=no'
+ c << 'ripd=no'
+ c << 'ripngd=no'
+ c << 'isisd=no'
+ c << 'babeld=no'
+ with open('{0}/daemons'.format(self.config_dir), 'w') as
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+
+ c = base.CmdBuffer()
+ c << 'hostname bgpd'
+ c << 'password zebra'
+ c << 'router bgp {0}'.format(self.asn)
+ c << 'bgp router-id {0}'.format(self.router_id)
+ if any(info['graceful_restart'] for info in
+ c << 'bgp graceful-restart'
+
+ version = 4
+ version = netaddr.IPNetwork(info['neigh_
addr']).version
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << 'no bgp default ipv4-unicast'
+
+ c << 'neighbor {0} remote-as {1}'.format(n_addr,
peer.asn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'neighbor {0} route-server-client'.format(n_
addr)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ c << 'neighbor {0} route-map {1}
{2}'.format(n_addr,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
p['name'],
+ typ)
+ c << 'neighbor {0} password {1}'.format(n_addr,
info['passwd'])
+ c << 'neighbor {0} passive'.format(n_addr)
+ c << 'address-family ipv6 unicast'
+ c << 'neighbor {0} activate'.format(n_addr)
+ c << 'exit-address-family'
+
+ c << 'network {0}'.format(route['prefix'])
+ c << 'address-family ipv6 unicast'
+ c << 'network {0}'.format(route['prefix'])
+ c << 'exit-address-family'
+ raise Exception(
{0}'.format(route['rf']))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c << 'address-family ipv6 unicast'
+ c << 'redistribute connected'
+ c << 'exit-address-family'
+ c << 'redistribute connected'
+
+ c << 'access-list {0} {1} {2}'.format(name,
policy['type'],
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ policy['match'])
+ c << 'route-map {0} permit 10'.format(name)
+ c << 'match ip address {0}'.format(name)
+ c << 'set metric {0}'.format(policy['med'])
+
+ c << 'debug bgp as4'
+ c << 'debug bgp fsm'
+ c << 'debug bgp updates'
+ c << 'debug bgp events'
+ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
+
+ with open('{0}/bgpd.conf'.format(self.config_dir), 'w')
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'hostname zebra'
+ c << 'password zebra'
+ c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
+ c << 'debug zebra packet'
+ c << 'debug zebra kernel'
+ c << 'debug zebra rib'
+ c << ''
+
+ with open('{0}/zebra.conf'.format(self.config_dir), 'w')
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ cmd = [cmd]
+ cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
+ return self.exec_on_ctn(
+ "vtysh -d bgpd -c 'en' -c 'conf t' -c "
+ "'router bgp {0}' {1}".format(self.asn, cmd),
+ capture=True)
+ return self.exec_on_ctn("vtysh -d bgpd
{0}".format(cmd),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ capture=True)
+
+ daemon = []
+ daemon.append('bgpd')
+ daemon.append('zebra')
+ cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
+ self.exec_on_ctn(cmd, capture=True)
+
+
+ def __init__(self, name, config, ctn_image_name,
+ asn = None
+ router_id = None
+ line = line.strip()
+ asn = int(line[len('router bgp'):].strip())
+ router_id = line[len('bgp router-id'):].strip()
+ raise Exception('asn not in quagga config')
+ raise Exception('router-id not in quagga config')
+ self.config = config
+ super(RawQuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
ctn_image_name,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
zebra)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ with open(os.path.join(self.config_dir, 'bgpd.conf'),
'w')
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(self.config)
+ f.writelines(self.config)
diff --git a/ryu/lib/docker/ryubgp.py b/ryu/lib/docker/ryubgp.py
new file mode 100644
index 0000000..8fe16f4
--- /dev/null
+++ b/ryu/lib/docker/ryubgp.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import time
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/ryu'
+
+ super(RyuBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ ctn_image_name)
+ self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
+ self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME,
'ryu.conf')
+ self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME,
'bgp_conf.py')
+ self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+
+ c = base.CmdBuffer()
+ c << '[DEFAULT]'
+ c << 'verbose=True'
+ c << 'log_file=/etc/ryu/manager.log'
+ LOG.info("[%s's new config]" % self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ c = base.CmdBuffer()
+ c << 'import os'
+ c << ''
+ c << 'BGP = {'
+ c << " 'local_as': %s," % str(self.asn)
+ c << " 'router_id': '%s'," % self.router_id
+ c << " 'neighbors': ["
+ c << " {"
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << " 'address': '%s'," % n_addr
+ c << " 'remote_as': %s," % str(peer.asn)
+ c << " 'enable_ipv4': True,"
+ c << " 'enable_ipv6': True,"
+ c << " 'enable_vpnv4': True,"
+ c << " 'enable_vpnv6': True,"
+ c << ' },'
+ c << ' ],'
+ c << " 'routes': ["
+ c << " {"
+ c << " 'prefix': '%s'," % route['prefix']
+ c << " },"
+ c << " ],"
+ c << "}"
+ log_conf = """LOGGING = {
+
+ # We use python logging package for logging.
+ 'version': 1,
+ 'disable_existing_loggers': False,
+
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s ' +
+ '[%(process)d %(thread)d] %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(asctime)s %(module)s
%(lineno)s ' +
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ '%(message)s'
+ },
+ 'stats': {
+ 'format': '%(message)s'
+ },
+ },
+
+ 'handlers': {
+ # Outputs log to console.
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ 'console_stats': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'stats'
+ },
+ # Rotates log file when its size reaches 10MB.
+ 'log_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'bgpspeaker.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'verbose'
+ },
+ 'stats_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'statistics_bgps.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'stats'
+ },
+ },
+
+ # Fine-grained control of logging per instance.
+ 'loggers': {
+ 'bgpspeaker': {
+ 'handlers': ['console', 'log_file'],
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'stats': {
+ 'handlers': ['stats_file', 'console_stats'],
+ 'level': 'INFO',
+ 'propagate': False,
+ 'formatter': 'stats',
+ },
+ },
+
+ # Root loggers.
+ 'root': {
+ 'handlers': ['console', 'log_file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+}"""
+ c << log_conf
+ with open(os.path.join(self.config_dir, 'bgp_conf.py'),
'w')
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ self._create_config_ryu()
+ self._create_config_ryu_bgp()
+
+ results = self.exec_on_ctn('ps ax')
+ running = False
+ running = True
+ return running
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = "ryu-manager --verbose "
+ cmd += "--config-file %s " % self.SHARED_RYU_CONF
+ cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
+ cmd += "ryu.services.protocols.bgp.application"
+ self.exec_on_ctn(cmd, detach=True)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ return True
+ result = False
+ try_times = 3
+ try_times = 1
+ cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
+ self.exec_on_ctn(cmd)
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ w_time = super(RyuBGPContainer,
+ self).run(wait=wait,
w_time=self.WAIT_FOR_BOOT)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+ return w_time
+
+ self.stop_ryubgp(retry=True)
+ self.start_ryubgp(retry=True)
diff --git a/ryu/tests/integrated/common/__init__.py
b/ryu/tests/integrated/common/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/ryu/tests/integrated/common/docker_base.py
b/ryu/tests/integrated/common/docker_base.py
deleted file mode 100644
index 1ae2cc2..0000000
--- a/ryu/tests/integrated/common/docker_base.py
+++ /dev/null
@@ -1,801 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/base.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import itertools
-import logging
-import os
-import subprocess
-import time
-
-import netaddr
-import six
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_TEST_PREFIX = ''
-DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
-TEST_PREFIX = DEFAULT_TEST_PREFIX
-TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
-
-BGP_FSM_IDLE = 'BGP_FSM_IDLE'
-BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
-BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
-
-BGP_ATTR_TYPE_ORIGIN = 1
-BGP_ATTR_TYPE_AS_PATH = 2
-BGP_ATTR_TYPE_NEXT_HOP = 3
-BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
-BGP_ATTR_TYPE_LOCAL_PREF = 5
-BGP_ATTR_TYPE_COMMUNITIES = 8
-BGP_ATTR_TYPE_ORIGINATOR_ID = 9
-BGP_ATTR_TYPE_CLUSTER_LIST = 10
-BGP_ATTR_TYPE_MP_REACH_NLRI = 14
-BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
-
-BRIDGE_TYPE_DOCKER = 'docker'
-BRIDGE_TYPE_BRCTL = 'brctl'
-BRIDGE_TYPE_OVS = 'ovs'
-
-
- super(CommandError, self).__init__()
- self.out = out
-
-
- e = RuntimeError()
- r = f()
- time.sleep(s)
- return r
- raise e
-
-
- super(CmdBuffer, self).__init__()
- self.delim = delim
-
- self.append(value)
-
- return self.delim.join(self)
-
-
-
- def __new__(cls, stdout, stderr, command, returncode,
- stdout = stdout or ''
- obj = super(CommandOut, cls).__new__(cls, stdout,
**kwargs)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- obj.stderr = stderr or ''
- obj.command = command
- obj.returncode = returncode
- return obj
-
-
-
- """Execute a command using subprocess.Popen()
- - out: stdout from subprocess.Popen()
- out has some attributes.
- out.returncode: returncode of subprocess.Popen()
- out.stderr: stderr from subprocess.Popen()
- """
- p_stdout = subprocess.PIPE
- p_stderr = subprocess.PIPE
- p_stdout = None
- p_stderr = None
- pop = subprocess.Popen(cmd, shell=True,
executable=executable,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- stdout=p_stdout,
- stderr=p_stderr)
- __stdout, __stderr = pop.communicate()
- _stdout = six.text_type(__stdout, 'utf-8')
- _stderr = six.text_type(__stderr, 'utf-8')
- out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
- return out
-
- out = None
- out = self._execute(cmd, capture=capture)
- LOG.info(out.command)
- return out
- LOG.error("stdout: %s", out)
- LOG.error("stderr: %s", out.stderr)
- break
- time.sleep(interval)
- raise CommandError(out)
-
- cmd = 'sudo %s' % cmd
- return self.execute(cmd, capture=capture,
- try_times=try_times,
interval=interval)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
-
- self.baseimage = baseimage
- self.cmd = Command()
-
- out = self.cmd.sudo('sudo docker images')
- images = []
- images.append(line.split()[0])
- return images
-
- return name in self.get_images()
-
- self.cmd.sudo(
- "docker build -t {0} {1}".format(tagname,
dockerfile_dir),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- try_times=3)
-
- return tagname
- self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
-
- def create_quagga(self, tagname='quagga', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- pkges = ' '.join([
- 'telnet',
- 'tcpdump',
- 'quagga',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'RUN apt-get update'
- c << 'RUN apt-get install -qy --no-install-recommends %s'
%
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
pkges
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'CMD /usr/lib/quagga/bgpd'
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.build(tagname, workdir)
- return tagname
-
- def create_ryu(self, tagname='ryu', image=None,
- return tagname
- workdir = os.path.join(TEST_BASE_DIR, tagname)
- workdir_ctn = '/root/osrg/ryu'
- pkges = ' '.join([
- 'tcpdump',
- 'iproute2',
- ])
- use_image = image
- use_image = self.baseimage
- c = CmdBuffer()
- c << 'FROM %s' % use_image
- c << 'ADD ryu %s' % workdir_ctn
- install = ' '.join([
- 'RUN apt-get update',
- '&& apt-get install -qy --no-install-recommends %s' %
pkges,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- '&& cd %s' % workdir_ctn,
- # Note: Clean previous builds, because "python
setup.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
install"
- # might fail if the current directory contains the
symlink to
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- # Docker host file systems.
- '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
- '&& pip install -r tools/pip-requires -r
tools/optional-requires',
- '&& python setup.py install',
- ])
- c << install
-
- self.cmd.sudo('rm -rf %s' % workdir)
- self.cmd.execute('mkdir -p %s' % workdir)
- self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c),
workdir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.cmd.execute('cp -r ../ryu %s/' % workdir)
- self.build(tagname, workdir)
- return tagname
-
-
- def __init__(self, name, subnet='', start_ip=None,
end_ip=None,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- with_ip=True, self_ip=False,
- fixed_ip=None, reuse=False,
- """Manage a bridge
- - name: bridge name
- - subnet: network cider to be used in this bridge
- - start_ip: start address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - end_ip: end address of an ip to be used in the
subnet
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - with_ip: specify if assign automatically an ip
address
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - self_ip: specify if assign an ip address for the
bridge
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- - fixed_ip: an ip address to be assigned to the bridge
- - reuse: specify if use an existing bridge
- - br_type: One either in a 'docker', 'brctl' or 'ovs'
- """
- self.cmd = Command()
- self.name = name
- if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
- raise Exception("argument error br_type: %s" %
br_type)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.br_type = br_type
- self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
- self.name = '{0}_{1}'.format(TEST_PREFIX, name)
- self.with_ip = with_ip
- self.subnet = netaddr.IPNetwork(subnet)
- self.start_ip = start_ip
- self.start_ip = netaddr.IPAddress(self.subnet.
first)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.end_ip = end_ip
- self.end_ip = netaddr.IPAddress(self.subnet.last)
-
- for host in netaddr.IPRange(self.start_ip,
- yield host
- self._ip_generator = _ip_gen()
- # throw away first network address
- self.next_ip_address()
-
- self.self_ip = self_ip
- self.ip_addr = fixed_ip
- self.ip_addr = self.next_ip_address()
- gw = "--gateway %s" %
self.ip_addr.split('/')[0]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- v6 = ''
- v6 = '--ipv6'
- cmd = ("docker network create --driver bridge
%s "
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- "%s --subnet %s %s" % (v6, gw, subnet,
self.name))
- cmd = "ip link add {0} type bridge".format(
self.name)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- cmd = "ovs-vsctl add-br {0}".format(self.name
)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- raise ValueError('Unsupported br_type: %s' %
self.br_type)
- self.delete()
- self.execute(cmd, sudo=True, retry=True)
- try_several_times(f)
- self.execute("ip link set up dev {0}".format(
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- sudo=True, retry=True)
-
- ips = self.check_br_addr(self.name)
- self.execute(
- "ip addr del {0} dev {1}".format(ip,
self.name),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- sudo=True, retry=True)
- self.execute(
- "ip addr add {0} dev {1}".format(self.ip_addr,
self.name
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
),
- sudo=True, retry=True)
- self.ctns = []
-
- out = self.execute('docker network ls', sudo=True,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- bridges = []
- bridges.append(line.split()[1])
- return bridges
-
- out = self.execute('brctl show', retry=True)
- bridges = []
- bridges.append(line.split()[0])
- return bridges
-
- out = self.execute('ovs-vsctl list-br', sudo=True,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return out.splitlines()
-
- return self.get_bridges_dc()
- return self.get_bridges_brctl()
- return self.get_bridges_ovs()
-
- return self.name in self.get_bridges()
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3,
interval=1)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return m(cmd, capture=capture)
-
- ips = {}
- cmd = "ip a show dev %s" % br
- elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ips[4] = elems[1]
- elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ips[6] = elems[1]
- return ips
-
- return "{0}/{1}".format(next(self._ip_generator),
- self.subnet.prefixlen)
-
- name = ctn.next_if_name()
- self.ctns.append(ctn)
- ip_address = None
- ipv4 = None
- ipv6 = None
- ip_address = self.next_ip_address()
- ip_address_ip = ip_address.split('/')[0]
- version = 4
- version = 6
- opt_ip = "--ip %s" % ip_address_ip
- ipv4 = ip_address
- opt_ip = "--ip6 %s" % ip_address_ip
- ipv6 = ip_address
- cmd = "docker network connect %s %s %s" % (
- opt_ip, self.name, ctn.docker_name())
- self.execute(cmd, sudo=True)
- ctn.set_addr_info(bridge=self.name, ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ifname=name)
- ip_address = self.next_ip_address()
- version = 4
- version = 6
- ctn.pipework(self, ip_address, name,
version=version)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ctn.pipework(self, '0/0', name)
- return ip_address
-
- return
- self.execute("docker network rm %s" % self.name,
- sudo=True, retry=True)
- self.execute("ip link set down dev %s" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ip link delete %s type bridge" % self.name,
- sudo=True, retry=True)
- self.execute(
- "ovs-vsctl del-br %s" % self.name,
- sudo=True, retry=True)
-
-
- self.name = name
- self.image = image
- self.shared_volumes = []
- self.ip_addrs = []
- self.ip6_addrs = []
- self.is_running = False
- self.eths = []
- self.id = None
-
- self.cmd = Command()
- self.remove()
-
- return self.name
- return '{0}_{1}'.format(TEST_PREFIX, self.name)
-
- return self.id
- return self.docker_name()
-
- name = 'eth{0}'.format(len(self.eths) + 1)
- self.eths.append(name)
- return name
-
- def set_addr_info(self, bridge, ipv4=None, ipv6=None,
- self.ip_addrs.append((ifname, ipv4, bridge))
- self.ip6_addrs.append((ifname, ipv6, bridge))
-
- addrinfo = {}
- ip_addrs = self.ip_addrs
- ip_addrs = self.ip6_addrs
- return None
- addrinfo[addr[1]] = addr[0]
- return addrinfo
-
- m = self.cmd.sudo
- m = self.cmd.execute
- return m(cmd, capture=capture, try_times=3,
interval=1)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return m(cmd, capture=capture)
-
- return self.cmd.sudo(cmd, capture=capture,
try_times=3,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
interval=1)
- return self.cmd.sudo(cmd, capture=capture)
-
- name = self.docker_name()
- flag = '-d' if detach else ''
- return self.dcexec('docker exec {0} {1} {2}'.format(
- flag, name, cmd), capture=capture)
-
- cmd = 'docker ps --no-trunc=true'
- cmd += ' --all=true'
- out = self.dcexec(cmd, retry=True)
- containers = []
- containers.append(line.split()[-1])
- return containers
-
- return self.docker_name() in self.get_containers(allctn=
allctn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c = CmdBuffer(' ')
- c << "docker run --privileged=true"
- c << "-v {0}:{1}".format(sv[0], sv[1])
- c << "--name {0} --hostname {0} -id
{1}".format(self.docker_name()
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
,
-
self.image)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.id = self.dcexec(str(c), retry=True)
- self.is_running = True
- self.exec_on_ctn("ip li set up dev lo")
- ipv4 = None
- ipv6 = None
- for line in self.exec_on_ctn("ip a show dev
- elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ipv4 = elems[1]
- elems = [e.strip() for e in line.strip().split('
')]
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ipv6 = elems[1]
- self.set_addr_info(bridge='docker0', ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ifname='eth0')
- return 0
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker stop -t 0 %s' % ctn_id,
retry=True)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.is_running = False
- return out
-
- return
- ctn_id = self.get_docker_id()
- out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
- self.is_running = False
- return out
-
- LOG.warning('Call run() before pipeworking')
- return
- c = CmdBuffer(' ')
- c << "pipework {0}".format(bridge.name)
-
- c << "-i {0}".format(intf_name)
- intf_name = "eth1"
- ipv4 = None
- ipv6 = None
- ipv4 = ip_addr
- c << '-a 6'
- ipv6 = ip_addr
- c << "{0} {1}".format(self.docker_name(), ip_addr)
- self.set_addr_info(bridge=bridge.name, ipv4=ipv4,
ipv6=ipv6,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ifname=intf_name)
- self.execute(str(c), sudo=True, retry=True)
-
- cmd = "docker inspect -f '{{.State.Pid}}' %s" %
self.docker_name()
- return int(self.dcexec(cmd))
- return -1
-
- interface = "eth0"
- filename = "{0}/{1}.dump".format(
- self.shared_volumes[0][1], interface)
- self.exec_on_ctn(
- "tcpdump -i {0} -w {1}".format(interface, filename),
- detach=True)
-
-
-
- WAIT_FOR_BOOT = 1
- RETRY_INTERVAL = 5
- DEFAULT_PEER_ARG = {'neigh_addr': '',
- 'passwd': None,
- 'vpn': False,
- 'flowspec': False,
- 'is_rs_client': False,
- 'is_rr_client': False,
- 'cluster_id': None,
- 'policies': None,
- 'passive': False,
- 'local_addr': '',
- 'as2': False,
- 'graceful_restart': None,
- 'local_as': None,
- 'prefix_limit': None}
- default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
- DEFAULT_ROUTE_ARG = {'prefix': None,
- 'rf': 'ipv4',
- 'attr': None,
- 'next-hop': None,
- 'as-path': None,
- 'community': None,
- 'med': None,
- 'local-pref': None,
- 'extended-community': None,
- 'matchs': None,
- 'thens': None}
- default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
-
- self.config_dir = TEST_BASE_DIR
- self.config_dir = os.path.join(self.config_dir,
TEST_PREFIX)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.config_dir = os.path.join(self.config_dir, name)
- self.asn = asn
- self.router_id = router_id
- self.peers = {}
- self.routes = {}
- self.policies = {}
- super(BGPContainer, self).__init__(name, ctn_image_name)
- self.execute(
- 'rm -rf {0}'.format(self.config_dir), sudo=True)
- self.execute('mkdir -p {0}'.format(self.config_dir))
- self.execute('chmod 777 {0}'.format(self.config_dir))
-
- return str({'name': self.name, 'asn': self.asn,
- 'router_id': self.router_id})
-
- self.create_config()
- super(BGPContainer, self).run()
- time.sleep(w_time)
- return w_time
-
- def add_peer(self, peer, bridge='', reload_config=True,
v6=False,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- peer_info = peer_info or {}
- self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
- self.peers[peer].update(peer_info)
- peer_keys = sorted(self.peers[peer].keys())
- raise Exception("argument error peer_info: %s" %
peer_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- neigh_addr = ''
- local_addr = ''
- it = itertools.product(self.ip_addrs, peer.ip_addrs)
- it = itertools.product(self.ip6_addrs,
peer.ip6_addrs)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- continue
- neigh_addr = you[1]
- local_addr = me[1]
- addr, mask = local_addr.split('/')
- local_addr = "{0}%{1}/{2}".format(addr, me[0],
mask)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- break
-
- raise Exception('peer {0} seems not ip
reachable'.format(peer))
-
- self.peers[peer]['policies'] = {}
-
- self.peers[peer]['neigh_addr'] = neigh_addr
- self.peers[peer]['local_addr'] = local_addr
- self.create_config()
- self.reload_config()
-
- del self.peers[peer]
- self.create_config()
- self.reload_config()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- return self.execute('cat {0}/*.log'.format(self.config_
dir))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- def add_route(self, route, reload_config=True,
- route_info = route_info or {}
- self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
- self.routes[route].update(route_info)
- route_keys = sorted(self.routes[route].keys())
- raise Exception("argument error route_info: %s" %
route_info)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.routes[route]['prefix'] = route
- self.create_config()
- self.reload_config()
-
- def add_policy(self, policy, peer, typ, default='accept',
- self.set_default_policy(peer, typ, default)
- self.define_policy(policy)
- self.assign_policy(peer, policy, typ)
- self.create_config()
- self.reload_config()
-
- if (typ in ['in', 'out', 'import', 'export'] and
- self.peers[peer]['default-policy'] = {}
- self.peers[peer]['default-policy'][typ] = default
- raise Exception('wrong type or default')
-
- self.policies[policy['name']] = policy
-
- raise Exception('peer {0} not found'.format(peer.name
))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- name = policy['name']
- raise Exception('policy {0} not found'.format(name))
- self.peers[peer]['policies'][typ] = policy
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- raise NotImplementedError()
-
- version = netaddr.IPNetwork(prefix).version
- addr = prefix.split('/')[0]
- ping_cmd = 'ping'
- ping_cmd = 'ping6'
- raise Exception(
- 'unsupported route family: {0}'.format(version))
- cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs
echo"'.format(
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ping_cmd, addr)
- interval = 1
- count = 0
- res = self.exec_on_ctn(cmd)
- LOG.info(res)
- break
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
- return True
-
- interval = 1
- count = 0
- state = self.get_neighbor_state(peer)
- LOG.info("%s's peer %s state: %s",
- self.router_id, peer.router_id, state)
- return
-
- time.sleep(interval)
- count += interval
- raise Exception('timeout')
-
- cmd = '/sbin/ip route add {0} via {1}'.format(network,
next_hop)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.exec_on_ctn(cmd)
-
- cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
- self.exec_on_ctn(cmd)
-
- raise NotImplementedError()
-
- raise NotImplementedError()
diff --git a/ryu/tests/integrated/common/
install_docker_test_pkg.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg.sh
deleted file mode 100644
index a771dfc..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-function add_docker_aptline {
- sudo apt-get update
- if ! apt-cache search docker-engine | grep docker-engine; then
- VER=`lsb_release -r`
- if echo $VER | grep 12.04; then
- REL_NAME=precise
- elif echo $VER | grep 14.04; then
- REL_NAME=trusty
- elif echo $VER | grep 15.10; then
- REL_NAME=wily
- elif echo $VER | grep 16.04; then
- REL_NAME=xenial
- else
- retrun 1
- fi
- RELEASE=ubuntu-$REL_NAME
- sudo apt-key adv --keyserver
hkp://p80.pool.sks-keyservers.
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
net:80
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D
- sudo sh -c "echo deb https://apt.dockerproject.org/repo
$RELEASE
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
main > /etc/apt/sources.list.d/docker.list"
- fi
-}
-
-init_variables
-
-if [ $APTLINE_DOCKER -eq 1 ]; then
- add_docker_aptline
-fi
-
-sudo apt-get update
-if apt-cache search docker-engine | grep docker-engine; then
- DOCKER_PKG=docker-engine
-else
- DOCKER_PKG=docker.io
-fi
-sudo apt-get install -y $DOCKER_PKG
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/install_docker_test_pkg_
common.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg_common.sh
deleted file mode 100644
index 44a3e10..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_
common.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-set -ex
-
-function init_variables {
- APTLINE_DOCKER=0
- DIR_BASE=/tmp
-}
-
-function process_options {
- local max
- local i
- max=$#
- i=1
- while [ $i -le $max ]; do
- case "$1" in
- -a|--add-docker-aptline)
- APTLINE_DOCKER=1
- ;;
- -d|--download-dir)
- shift; ((i++))
- DIR_BASE=$1
- ;;
- esac
- shift; ((i++))
- done
-}
-
-function install_pipework {
- if ! which /usr/local/bin/pipework >/dev/null
- then
- sudo rm -rf $DIR_BASE/pipework
- git clone https://github.com/jpetazzo/pipework.git
$DIR_BASE/pipework
- sudo install -m 0755 $DIR_BASE/pipework/pipework
/usr/local/bin/pipework
- fi
-}
-
-function install_depends_pkg {
- install_pipework
-}
diff --git a/ryu/tests/integrated/common/
install_docker_test_pkg_for_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
b/ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
deleted file mode 100644
index d8c3b49..0000000
--- a/ryu/tests/integrated/common/install_docker_test_pkg_for_
travis.sh
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -ex
-
-RYU_PATH=`dirname $0`
-
-source ${RYU_PATH}/install_docker_test_pkg_common.sh
-
-init_variables
-
-sudo apt-get update
-install_depends_pkg
diff --git a/ryu/tests/integrated/common/quagga.py
b/ryu/tests/integrated/common/quagga.py
deleted file mode 100644
index 9b6d218..0000000
--- a/ryu/tests/integrated/common/quagga.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
-#
-# This is based on the following
-# https://github.com/osrg/gobgp/test/lib/quagga.py
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-
-import netaddr
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/quagga'
-
- def __init__(self, name, asn, router_id, ctn_image_name,
- super(QuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ctn_image_name)
- self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- self.zebra = zebra
- self._create_config_debian()
-
- w_time = super(QuaggaBGPContainer,
- self).run(wait=wait,
w_time=self.WAIT_FOR_BOOT)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return w_time
-
- rib = []
- return self.get_global_rib_with_prefix(prefix, rf)
-
- out = self.vtysh('show bgp {0} unicast'.format(rf),
config=False)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return rib
-
- read_next = False
-
- ibgp = False
- line = line[2:]
- line = line[1:]
- ibgp = True
- continue
-
- elems = line.split()
-
- read_next = True
- prefix = elems[0]
- continue
- nexthop = elems[0]
- prefix = elems[0]
- nexthop = elems[1]
- read_next = False
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'ibgp': ibgp})
-
- return rib
-
- rib = []
-
- lines = [line.strip() for line in self.vtysh(
- 'show bgp {0} unicast {1}'.format(rf, prefix),
- config=False).split('\n')]
-
- return rib
-
- lines = lines[2:]
-
- lines.pop(0) # another useless line
- elif lines[0].startswith('Advertised to non peer-group
- lines = lines[2:] # other useless lines
- raise Exception('unknown output format
{0}'.format(lines))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- aspath = []
- aspath = [int(asn) for asn in lines[0].split()]
-
- nexthop = lines[1].split()[0].strip()
- info = [s.strip(',') for s in lines[2].split()]
- attrs = []
- med = info[info.index('metric') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_
DISC,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- 'metric': int(med)})
- localpref = info[info.index('localpref') + 1]
- attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
- 'value': int(localpref)})
-
- rib.append({'prefix': prefix, 'nexthop': nexthop,
- 'aspath': aspath, 'attrs': attrs})
-
- return rib
-
- raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
-
- info = [l.strip() for l in self.vtysh(
- 'show bgp neighbors {0}'.format(neigh_addr),
- config=False).split('\n')]
-
- raise Exception('unknown format')
-
- idx1 = info[0].index('BGP neighbor is ')
- idx2 = info[0].index(',')
- n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
- idx1 = info[2].index('= ')
- state = info[2][idx1 + len('= '):]
- return base.BGP_FSM_IDLE
- return base.BGP_FSM_ACTIVE
- return base.BGP_FSM_ESTABLISHED
- return state
-
- raise Exception('not found peer
{0}'.format(peer.router_id))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- self.vtysh('clear ip bgp * soft', config=False)
-
- zebra = 'no'
- self._create_config_bgp()
- zebra = 'yes'
- self._create_config_zebra()
- self._create_config_daemons(zebra)
-
- c = base.CmdBuffer()
- c << 'vtysh_enable=yes'
- c << 'zebra_options=" --daemon -A 127.0.0.1"'
- c << 'bgpd_options=" --daemon -A 127.0.0.1"'
- c << 'ospfd_options=" --daemon -A 127.0.0.1"'
- c << 'ospf6d_options=" --daemon -A ::1"'
- c << 'ripd_options=" --daemon -A 127.0.0.1"'
- c << 'ripngd_options=" --daemon -A ::1"'
- c << 'isisd_options=" --daemon -A 127.0.0.1"'
- c << 'babeld_options=" --daemon -A 127.0.0.1"'
- c << 'watchquagga_enable=yes'
- c << 'watchquagga_options=(--daemon)'
- with open('{0}/debian.conf'.format(self.config_dir),
'w') as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'zebra=%s' % zebra
- c << 'bgpd=yes'
- c << 'ospfd=no'
- c << 'ospf6d=no'
- c << 'ripd=no'
- c << 'ripngd=no'
- c << 'isisd=no'
- c << 'babeld=no'
- with open('{0}/daemons'.format(self.config_dir), 'w') as
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
-
- c = base.CmdBuffer()
- c << 'hostname bgpd'
- c << 'password zebra'
- c << 'router bgp {0}'.format(self.asn)
- c << 'bgp router-id {0}'.format(self.router_id)
- if any(info['graceful_restart'] for info in
- c << 'bgp graceful-restart'
-
- version = 4
- version = netaddr.IPNetwork(info['neigh_
addr']).version
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- n_addr = info['neigh_addr'].split('/')[0]
- c << 'no bgp default ipv4-unicast'
-
- c << 'neighbor {0} remote-as {1}'.format(n_addr,
peer.asn)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'neighbor {0} route-server-client'.format(n_
addr)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- c << 'neighbor {0} route-map {1}
{2}'.format(n_addr,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
p['name'],
- typ)
- c << 'neighbor {0} password {1}'.format(n_addr,
info['passwd'])
- c << 'neighbor {0} passive'.format(n_addr)
- c << 'address-family ipv6 unicast'
- c << 'neighbor {0} activate'.format(n_addr)
- c << 'exit-address-family'
-
- c << 'network {0}'.format(route['prefix'])
- c << 'address-family ipv6 unicast'
- c << 'network {0}'.format(route['prefix'])
- c << 'exit-address-family'
- raise Exception(
{0}'.format(route['rf']))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c << 'address-family ipv6 unicast'
- c << 'redistribute connected'
- c << 'exit-address-family'
- c << 'redistribute connected'
-
- c << 'access-list {0} {1} {2}'.format(name,
policy['type'],
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- policy['match'])
- c << 'route-map {0} permit 10'.format(name)
- c << 'match ip address {0}'.format(name)
- c << 'set metric {0}'.format(policy['med'])
-
- c << 'debug bgp as4'
- c << 'debug bgp fsm'
- c << 'debug bgp updates'
- c << 'debug bgp events'
- c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
-
- with open('{0}/bgpd.conf'.format(self.config_dir), 'w')
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'hostname zebra'
- c << 'password zebra'
- c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
- c << 'debug zebra packet'
- c << 'debug zebra kernel'
- c << 'debug zebra rib'
- c << ''
-
- with open('{0}/zebra.conf'.format(self.config_dir), 'w')
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- cmd = [cmd]
- cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
- return self.exec_on_ctn(
- "vtysh -d bgpd -c 'en' -c 'conf t' -c "
- "'router bgp {0}' {1}".format(self.asn, cmd),
- capture=True)
- return self.exec_on_ctn("vtysh -d bgpd
{0}".format(cmd),
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- capture=True)
-
- daemon = []
- daemon.append('bgpd')
- daemon.append('zebra')
- cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
- self.exec_on_ctn(cmd, capture=True)
-
-
- def __init__(self, name, config, ctn_image_name,
- asn = None
- router_id = None
- line = line.strip()
- asn = int(line[len('router bgp'):].strip())
- router_id = line[len('bgp router-id'):].strip()
- raise Exception('asn not in quagga config')
- raise Exception('router-id not in quagga config')
- self.config = config
- super(RawQuaggaBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
ctn_image_name,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
zebra)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- with open(os.path.join(self.config_dir, 'bgpd.conf'),
'w')
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(self.config)
- f.writelines(self.config)
diff --git a/ryu/tests/integrated/common/ryubgp.py
b/ryu/tests/integrated/common/ryubgp.py
deleted file mode 100644
index 8fe16f4..0000000
--- a/ryu/tests/integrated/common/ryubgp.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
software
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# distributed under the License is distributed on an "AS IS"
BASIS,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions
and
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import logging
-import os
-import time
-
-from . import docker_base as base
-
-LOG = logging.getLogger(__name__)
-
-
-
- WAIT_FOR_BOOT = 1
- SHARED_VOLUME = '/etc/ryu'
-
- super(RyuBGPContainer, self).__init__(name, asn,
router_id,
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- ctn_image_name)
- self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
- self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME,
'ryu.conf')
- self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME,
'bgp_conf.py')
- self.shared_volumes.append((self.config_dir,
self.SHARED_VOLUME))
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
-
- c = base.CmdBuffer()
- c << '[DEFAULT]'
- c << 'verbose=True'
- c << 'log_file=/etc/ryu/manager.log'
- LOG.info("[%s's new config]" % self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- c = base.CmdBuffer()
- c << 'import os'
- c << ''
- c << 'BGP = {'
- c << " 'local_as': %s," % str(self.asn)
- c << " 'router_id': '%s'," % self.router_id
- c << " 'neighbors': ["
- c << " {"
- n_addr = info['neigh_addr'].split('/')[0]
- c << " 'address': '%s'," % n_addr
- c << " 'remote_as': %s," % str(peer.asn)
- c << " 'enable_ipv4': True,"
- c << " 'enable_ipv6': True,"
- c << " 'enable_vpnv4': True,"
- c << " 'enable_vpnv6': True,"
- c << ' },'
- c << ' ],'
- c << " 'routes': ["
- c << " {"
- c << " 'prefix': '%s'," % route['prefix']
- c << " },"
- c << " ],"
- c << "}"
- log_conf = """LOGGING = {
-
- # We use python logging package for logging.
- 'version': 1,
- 'disable_existing_loggers': False,
-
- 'formatters': {
- 'verbose': {
- 'format': '%(levelname)s %(asctime)s %(module)s ' +
- '[%(process)d %(thread)d] %(message)s'
- },
- 'simple': {
- 'format': '%(levelname)s %(asctime)s %(module)s
%(lineno)s ' +
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- '%(message)s'
- },
- 'stats': {
- 'format': '%(message)s'
- },
- },
-
- 'handlers': {
- # Outputs log to console.
- 'console': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'simple'
- },
- 'console_stats': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'stats'
- },
- # Rotates log file when its size reaches 10MB.
- 'log_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'bgpspeaker.log'),
- 'maxBytes': '10000000',
- 'formatter': 'verbose'
- },
- 'stats_file': {
- 'level': 'DEBUG',
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join('.', 'statistics_bgps.log'),
- 'maxBytes': '10000000',
- 'formatter': 'stats'
- },
- },
-
- # Fine-grained control of logging per instance.
- 'loggers': {
- 'bgpspeaker': {
- 'handlers': ['console', 'log_file'],
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'stats': {
- 'handlers': ['stats_file', 'console_stats'],
- 'level': 'INFO',
- 'propagate': False,
- 'formatter': 'stats',
- },
- },
-
- # Root loggers.
- 'root': {
- 'handlers': ['console', 'log_file'],
- 'level': 'DEBUG',
- 'propagate': True,
- },
-}"""
- c << log_conf
- with open(os.path.join(self.config_dir, 'bgp_conf.py'),
'w')
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
as
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- LOG.info("[%s's new config]", self.name)
- LOG.info(str(c))
- f.writelines(str(c))
-
- self._create_config_ryu()
- self._create_config_ryu_bgp()
-
- results = self.exec_on_ctn('ps ax')
- running = False
- running = True
- return running
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = "ryu-manager --verbose "
- cmd += "--config-file %s " % self.SHARED_RYU_CONF
- cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
- cmd += "ryu.services.protocols.bgp.application"
- self.exec_on_ctn(cmd, detach=True)
- result = True
- break
- time.sleep(1)
- return result
-
- return True
- result = False
- try_times = 3
- try_times = 1
- cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
- self.exec_on_ctn(cmd)
- result = True
- break
- time.sleep(1)
- return result
-
- w_time = super(RyuBGPContainer,
- self).run(wait=wait,
w_time=self.WAIT_FOR_BOOT)
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
- return w_time
-
- self.stop_ryubgp(retry=True)
- self.start_ryubgp(retry=True)
diff --git a/tests/integrated/bgp/base.py
b/tests/integrated/bgp/base.py
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
index 26fa396..2f210de 100644
--- a/tests/integrated/bgp/base.py
+++ b/tests/integrated/bgp/base.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/base_ip6.py
b/tests/integrated/bgp/base_
Post by Takashi YAMAMOTO
Post by fumihiko kakuma
ip6.py
index be26faf..d867920 100644
--- a/tests/integrated/bgp/base_ip6.py
+++ b/tests/integrated/bgp/base_ip6.py
@@ -20,9 +20,9 @@ import logging
import sys
import unittest
-from ryu.tests.integrated.common import docker_base as ctn_base
-from ryu.tests.integrated.common import ryubgp
-from ryu.tests.integrated.common import quagga
+from ryu.lib.docker import docker_base as ctn_base
+from ryu.lib.docker import ryubgp
+from ryu.lib.docker import quagga
LOG = logging.getLogger(__name__)
diff --git a/tests/integrated/bgp/test_basic.py
b/tests/integrated/bgp/test_basic.py
index 5817d44..d1eda39 100644
--- a/tests/integrated/bgp/test_basic.py
+++ b/tests/integrated/bgp/test_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base
diff --git a/tests/integrated/bgp/test_ip6_basic.py
b/tests/integrated/bgp/test_ip6_basic.py
index 40461a5..911a0b5 100644
--- a/tests/integrated/bgp/test_ip6_basic.py
+++ b/tests/integrated/bgp/test_ip6_basic.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
import time
-from ryu.tests.integrated.common import docker_base as ctn_base
+from ryu.lib.docker import docker_base as ctn_base
from . import base_ip6 as base
--
2.7.4
------------------------------------------------------------
------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
Ryu-devel mailing list
https://lists.sourceforge.net/lists/listinfo/ryu-devel
--
--
--
fumihiko kakuma <***@valinux.co.jp>
Loading...