Skip to content

Commit

Permalink
Add client class
Browse files Browse the repository at this point in the history
Related-Issue: #6
  • Loading branch information
travelist committed Jun 1, 2015
1 parent dea77c0 commit 0218ada
Show file tree
Hide file tree
Showing 19 changed files with 457 additions and 26 deletions.
37 changes: 36 additions & 1 deletion surge/client.py
Original file line number Diff line number Diff line change
@@ -1 +1,36 @@
__author__ = 'kshimamu'
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from surge.surge_deployer.surge import VagrantDeployer
import surge.surge_deployer.utils as surge_utils


class SurgeClient:

def launch_kafka(self, pipeline_name, brokers):
deployer = VagrantDeployer(pipeline_name)
surge_utils.generate_kafka_component(pipeline_name, brokers)
deployer.deploy("virtualbox")

def launch_storm(self, pipeline_name, supervisors):
deployer = VagrantDeployer(pipeline_name)
surge_utils.generate_storm_component(pipeline_name, supervisors)
deployer.deploy("virtualbox")

def list_pipeline(self):
return surge_utils.list_pipelines()

def destroy_pipeline(self, pipeline_name):
deployer = VagrantDeployer(pipeline_name)
deployer.destroy()
2 changes: 0 additions & 2 deletions surge/surge_deployer/basevb/Vagrantfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@


# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Expand Down
2 changes: 0 additions & 2 deletions surge/surge_deployer/basevb/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@


# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Expand Down
2 changes: 0 additions & 2 deletions surge/surge_deployer/basevb/ansible.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@


# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Expand Down
16 changes: 16 additions & 0 deletions surge/surge_deployer/basevb/pipeline.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
hosts:
kafka:
count: 1
provider:
virtualbox:
memory: 2048
zookeeper:
count: 1
provider:
virtualbox:
memory: 1024
provider:
type:
virtualbox:
hostname_prefix: ''
ip_start: 10.20.30.10
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@


# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Expand All @@ -23,4 +21,4 @@ kafka_gid: 51521
kafka_logs_dir: /var/log/kafka
kafka_port: 9092
kafka_interface: eth1

include: kafka.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@


# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Expand Down Expand Up @@ -57,8 +55,17 @@
shell: sed 's/[^0-9]//g' /etc/hostname
register: id

#- name: Updating the server.properties conf
# template: src=server.properties.j2 dest=/usr/local/etc/kafka/config/server.properties owner={{ kafka_user }} group={{ kafka_group }} mode=0644

- name: Updating the server.properties conf
template: src=server.properties.j2 dest=/usr/local/etc/kafka/config/server.properties owner={{ kafka_user }} group={{ kafka_group }} mode=0644
when: "'zookeeper' in {{ groups }}"

- name: Updating server.properties conf for the added kafka node
template: src=add.server.properties.j2 dest=/usr/local/etc/kafka/config/server.properties owner={{ kafka_user }} group={{ kafka_group }} mode=0644
when: "'zookeeper' not in {{ groups }}"


#- name: Restarting Kafka
# supervisorctl: name=kafka state=restarted
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults

############################# Server Basics #############################

# The id of the broker. This must be set to a unique integer for each broker.
broker.id={{ id.stdout }}

############################# Socket Server Settings #############################

# The port the socket server listens on
port={{kafka_port}}

# Hostname the broker will bind to. If not set, the server will bind to all interfaces

host.name={{ ansible_ssh_host }}

# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured. Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
#advertised.host.name=<hostname routable by clients>

# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=<port accessible by clients>

# The number of threads handling network requests

num.network.threads=2

# The number of threads doing disk I/O
num.io.threads=8

# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=1048576

# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=1048576

# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600


############################# Log Basics #############################

# A comma separated list of directories under which to store log files
log.dirs={{ kafka_logs_dir }}/kafka-logs

# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=2

############################# Log Flush Policy #############################

# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.

# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000

# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000

############################# Log Retention Policy #############################

# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.

# The minimum age of a log file to be eligible for deletion
log.retention.hours=168

# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
#log.retention.bytes=1073741824

# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=536870912

# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=60000

# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false

############################# Zookeeper #############################

# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.

zookeeper.connect={{ zookeeper }}

# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=1000000
Empty file.
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@


# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Expand Down Expand Up @@ -49,5 +47,13 @@
- name: Creating the logs dir
file: path=/var/log/storm owner=storm group=storm mode=0750 state=directory

#- name: Updating the storm.yaml conf
# template: src=storm.yaml.j2 dest=/usr/local/etc/storm/conf/storm.yaml owner=storm group=storm mode=0644

- name: Updating the storm.yaml conf
template: src=storm.yaml.j2 dest=/usr/local/etc/storm/conf/storm.yaml owner=storm group=storm mode=0644
template: src=storm.yaml.j2 dest=/usr/local/etc/storm/conf/storm.yaml owner=storm group=storm mode=0644
when: "'zookeeper' in {{ groups }}"

- name: Adding storm.yml config for additional server
template: src=supervisor.yaml.j2 dest=/usr/local/etc/storm/conf/storm.yaml owner=storm group=storm mode=0644
when: "'zookeeper' not in {{ groups }}"
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
storm.local.dir: "/usr/local/etc/storm"

storm.zookeeper.servers:
- "{{ zookeeper }}"

nimbus.host: "{{ storm_nimbus1 }}"

nimbus.childopts: "-Xmx1024m -Djava.net.preferIPv4Stack=true"

ui.childopts: "-Xmx768m -Djava.net.preferIPv4Stack=true"

supervisor.childopts: "-Djava.net.preferIPv4Stack=true"
worker.childopts: "-Xmx768m -Djava.net.preferIPv4Stack=true"
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@


# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Expand All @@ -23,4 +21,5 @@ kafka_gid: 51521
kafka_logs_dir: /var/log/kafka
kafka_port: 9092
kafka_interface: eth1
include: kafka.yml

Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,16 @@
shell: sed 's/[^0-9]//g' /etc/hostname
register: id

#- name: Updating the server.properties conf
# template: src=server.properties.j2 dest=/usr/local/etc/kafka/config/server.properties owner={{ kafka_user }} group={{ kafka_group }} mode=0644

- name: Updating the server.properties conf
template: src=server.properties.j2 dest=/usr/local/etc/kafka/config/server.properties owner={{ kafka_user }} group={{ kafka_group }} mode=0644
when: "'zookeeper' in {{ groups }}"

- name: Updating server.properties conf for the added kafka node
template: src=add.server.properties.j2 dest=/usr/local/etc/kafka/config/server.properties owner={{ kafka_user }} group={{ kafka_group }} mode=0644
when: "'zookeeper' not in {{ groups }}"

- name: Restarting Kafka
supervisorctl: name=kafka state=restarted
Expand Down
Loading

0 comments on commit 0218ada

Please sign in to comment.