The following patches for the ovirt server / viewer implements an ovirt-speicifc vnc proxy solution in which the client (viewer) sends the vm name which they want to connect to before the connection is actually established and maintained. Thus the ovirt network will need to expose one public port for vnc access
Mohammed Morsi
2009-May-07 12:49 UTC
[Ovirt-devel] [PATCH server] added ovirt vnc proxy server, to proxy vnc request to managed vms
run on startup by default like the other ovirt services --- conf/ovirt-vnc-proxy | 49 ++++++++++++++ installer/modules/ovirt/manifests/ovirt.pp | 1 + ovirt-server.spec.in | 5 ++ src/vnc-proxy/vnc-proxy.rb | 94 ++++++++++++++++++++++++++++ 4 files changed, 149 insertions(+), 0 deletions(-) create mode 100755 conf/ovirt-vnc-proxy create mode 100644 src/vnc-proxy/vnc-proxy.rb diff --git a/conf/ovirt-vnc-proxy b/conf/ovirt-vnc-proxy new file mode 100755 index 0000000..b3f565f --- /dev/null +++ b/conf/ovirt-vnc-proxy @@ -0,0 +1,49 @@ +#!/bin/bash +# +# +# ovirt-vnc-proxy startup script for ovirt-vnc-proxy +# +# chkconfig: - 97 03 +# description: ovirt-vnc-proxy proxies vnc requests to ovirt +# managed vms. +# + +DAEMON=/usr/share/ovirt-server/vnc-proxy/vnc-proxy.rb + +. /etc/init.d/functions + +start() { + echo -n "Starting ovirt-vnc-proxy: " + daemon $DAEMON + RETVAL=$? + echo +} + +stop() { + echo -n "Shutting down ovirt-vnc-proxy: " + killproc vnc-proxy.rb + RETVAL=$? + echo +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart) + stop + start + ;; + status) + status $DAEMON + RETVAL=$? + ;; + *) + echo "Usage: ovirt-vnc-proxy {start|stop|restart|status}" + exit 1 + ;; +esac +exit $RETVAL diff --git a/installer/modules/ovirt/manifests/ovirt.pp b/installer/modules/ovirt/manifests/ovirt.pp index 09b1925..f7df804 100644 --- a/installer/modules/ovirt/manifests/ovirt.pp +++ b/installer/modules/ovirt/manifests/ovirt.pp @@ -196,6 +196,7 @@ class ovirt::setup { firewall_rule{"http": destination_port => "80"} firewall_rule {"https": destination_port => '443'} firewall_rule {"host-browser": destination_port => '12120'} + firewall_rule {"vnc-proxy": destination_port => '5500'} firewall_rule {"qpidd": destination_port => '5672'} firewall_rule {"collectd": destination_port => '25826', protocol => 'udp'} firewall_rule {"ntpd": destination_port => '123', protocol => 'udp'} diff --git a/ovirt-server.spec.in b/ovirt-server.spec.in index 6da7297..683f214 100644 --- a/ovirt-server.spec.in +++ b/ovirt-server.spec.in @@ -104,6 +104,7 @@ touch %{buildroot}%{_localstatedir}/log/%{name}/db-omatic.log %{__install} -Dp -m0755 %{pbuild}/conf/ovirt-mongrel-rails.sysconf %{buildroot}%{_sysconfdir}/sysconfig/ovirt-mongrel-rails %{__install} -Dp -m0755 %{pbuild}/conf/ovirt-rails.sysconf %{buildroot}%{_sysconfdir}/sysconfig/ovirt-rails %{__install} -Dp -m0755 %{pbuild}/conf/ovirt-taskomatic %{buildroot}%{_initrddir} +%{__install} -Dp -m0755 %{pbuild}/conf/ovirt-vnc-proxy %{buildroot}%{_initrddir} # copy over all of the src directory... %{__cp} -a %{pbuild}/src/* %{buildroot}%{app_root} @@ -179,6 +180,7 @@ fi %daemon_chkconfig_post -d ovirt-host-collect %daemon_chkconfig_post -d ovirt-mongrel-rails %daemon_chkconfig_post -d ovirt-taskomatic +%daemon_chkconfig_post -d ovirt-vnc-proxy %preun if [ "$1" = 0 ] ; then @@ -187,11 +189,13 @@ if [ "$1" = 0 ] ; then /sbin/service ovirt-host-collect stop > /dev/null 2>&1 /sbin/service ovirt-mongrel-rails stop > /dev/null 2>&1 /sbin/service ovirt-taskomatic stop > /dev/null 2>&1 + /sbin/service ovirt-vnc-proxy stop > /dev/null 2>&1 /sbin/chkconfig --del ovirt-host-browser /sbin/chkconfig --del ovirt-db-omatic /sbin/chkconfig --del ovirt-host-collect /sbin/chkconfig --del ovirt-mongrel-rails /sbin/chkconfig --del ovirt-taskomatic + /sbin/chkconfig --del ovirt-vnc-proxy fi %files @@ -204,6 +208,7 @@ fi %{_initrddir}/ovirt-host-collect %{_initrddir}/ovirt-mongrel-rails %{_initrddir}/ovirt-taskomatic +%{_initrddir}/ovirt-vnc-proxy %{_sysconfdir}/cron.d/%{name} %config(noreplace) %{_sysconfdir}/sysconfig/ovirt-mongrel-rails %config(noreplace) %{_sysconfdir}/sysconfig/ovirt-rails diff --git a/src/vnc-proxy/vnc-proxy.rb b/src/vnc-proxy/vnc-proxy.rb new file mode 100644 index 0000000..b20bb1f --- /dev/null +++ b/src/vnc-proxy/vnc-proxy.rb @@ -0,0 +1,94 @@ +#!/usr/bin/ruby +# +# vnc-proxy.rb +# ovirt vnc proxy server, relays ovirt encoded +# vnc requests to correct node +# Copyright (C) 2008 Red Hat, Inc. +# Written by Mohammed Morsi <mmorsi at redhat.com> +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. + +$: << File.join(File.dirname(__FILE__), "../dutils") + +require 'dutils' +require 'daemons' +include Daemonize + +########### +OVIRT_SERVER_PORT = 5500 +VM_NAME_MAX_LEN = 250 +VNC_DATA_MAX_LEN = 800000 + +########### + +$debug = true + +def DEBUG(msg) + puts msg if $debug +end + +$verbose = true +def VERBOSE(msg) + puts msg if $verbose +end + +########### + +# TODO catch errors + +daemonize + +server = TCPServer.open(OVIRT_SERVER_PORT) + +while(true) do + Thread.start(server.accept) do |client| + DEBUG "client accepted" + + # first msg will be the vm description + vm_description = client.recv(VM_NAME_MAX_LEN).to_s + DEBUG "vm received: " + vm_description + ";" + + # lookup vm + vm = Vm.find(:first, :conditions => [ "description = ?", vm_description ]) + unless vm.nil? || vm.host.nil? || vm.state != "running" + # connect to node + DEBUG "connecting to node " + vm.host.hostname + ":" + vm.vnc_port.to_s + node_socket = TCPSocket.open(vm.host.hostname, vm.vnc_port) + + # begin new thread to process server->client messages + Thread.start do + DEBUG "listening for server->client data" + while(true)do + node_data = node_socket.recv VNC_DATA_MAX_LEN + client.write node_data + VERBOSE "server -> client data " + node_data.size.to_s + end + end + + # process client -> server messages + DEBUG "listening for client->server data" + while(true) do + client_data = client.recv VNC_DATA_MAX_LEN + node_socket.write client_data + VERBOSE "client -> server data " + client_data.size.to_s + end + + node_socket.close + end + + client.close + end +end -- 1.6.0.6