scheduler.js

"use strict";

var http = require("http");
var EventEmitter = require("events").EventEmitter;
var util = require("util");
var uuid = require("uuid");

var helpers = require("./helpers");
var schedulerHandlers = require("./schedulerHandlers");
var mesos = require("./mesos")().getMesos();
var Builder = require("./builder");
var TaskHelper = require("./taskHelper");

var zookeeper = require("node-zookeeper-client");

/**
 * Represents a Mesos framework scheduler.
 * @constructor
 * @param {object} options - The option map object.
 */
function Scheduler (options) {

    if (!(this instanceof Scheduler)) {
        return new Scheduler(options);
    }

    // Inherit from EventEmitter
    EventEmitter.call(this);

    var self = this;

    self.options = {};
    self.options.frameworkName = (options.frameworkName ? options.frameworkName.replace(/ /g, "-") : "mesos-framework." + uuid.v4());
    self.options.user = options.user || "root";
    self.options.role = options.role || "*";
    self.options.restartStates = options.restartStates || ["TASK_FAILED", "TASK_LOST", "TASK_ERROR"]; // Task in TASK_FINISHED will NOT be restarted by default!
    self.options.frameworkFailoverTimeout = options.frameworkFailoverTimeout || 604800; // One week
    self.options.masterConnectionTimeout = options.masterConnectionTimeout * 1000 || 10000; // Ten seconds
    self.options.exponentialBackoffFactor = options.exponentialBackoffFactor || 1.5;
    self.options.exponentialBackoffMinimum = options.exponentialBackoffMinimum * 1000 || 1000; // One second
    self.options.exponentialBackoffMaximum = options.exponentialBackoffMaximum * 1000 || 15000; // 15 seconds
    self.options.killUnknownTasks = options.killUnknownTasks || false;
    self.options.serialNumberedTasks = (options.serialNumberedTasks !== false);

    // ZooKeeper
    self.options.useZk = options.useZk || false;
    self.options.zkUrl = options.zkUrl || "master.mesos:2181";
    self.options.zkPrefix = options.zkPrefix || "/dcos-service-";

    // Logging
    self.logger = helpers.getLogger((options.logging && options.logging.path ? options.logging.path : null), (options.logging && options.logging.fileName ? options.logging.fileName : null), (options.logging && options.logging.level ? options.logging.level : null));

    self.subscribeBackoffTime = 0;

    // Port allocation
    self.options.staticPorts = options.staticPorts || false;

    if (self.options.staticPorts){
        self.logger.info("Scheduler configured with fixed ports");
    }

    // Master discovery
    self.options.masterUrl = options.masterUrl || "127.0.0.1";
    self.options.port = parseInt(options.port) || 5050;

    // "Globals"
    self.frameworkId = null;
    self.mesosStreamId = null;
    self.lastHeartbeat = null;
    self.zkClient = null;

    // Tasks
    self.tasks = [];

    // Add tasks if there are any
    if (options.hasOwnProperty("tasks")) {
        self.tasks = helpers.sortTasksByPriority(options.tasks);
    }

    self.logger.debug(JSON.stringify(self.tasks));

    self.pendingTasks = [];
    self.launchedTasks = [];
    self.reconcileTasks = [];
    self.killTasks = [];

    // Add to pending tasks if not yet submitted
    self.tasks.forEach(function (task) {
        if (!task.isSubmitted) {
            self.pendingTasks.push(task);
        }
    });

    // Runtime info
    self.runtimeInfo = {};

    // Template for issuing Mesos Scheduler HTTP API requests
    self.requestTemplate = {};

    self.generateRequestTemplate = function () {
        self.requestTemplate = {
            host: self.options.masterUrl,
            port: self.options.port,
            path: "/api/v1/scheduler",
            method: "POST",
            headers: {
                'Content-Type': 'application/json'
            }
        };
    };

    // Fill the requestTemplate
    self.generateRequestTemplate();

    // Customer event handlers will be registered here
    self.customEventHandlers = {};

    // List of allowed event handler function names and their argument length
    var allowedEventHandlers = {
        "SUBSCRIBED": 1,
        "OFFERS": 1,
        "INVERSE_OFFERS": 1,
        "RESCIND":1,
        "RESCIND_INVERSE_OFFER": 1,
        "UPDATE": 1,
        "MESSAGE": 1,
        "FAILURE": 1,
        "ERROR": 1,
        "HEARTBEAT": 1
    };

    // Add custom event handlers if present
    if (options.handlers && Object.getOwnPropertyNames(options.handlers).length > 0) {
        Object.getOwnPropertyNames(options.handlers).forEach(function (handlerName) {
            var ucHandlerName = handlerName.toUpperCase();
            // Check if name is allowed, is a function and the length of the function arguments fit to the ones defined in allowedEventHandlers
            if (Object.getOwnPropertyNames(allowedEventHandlers).indexOf(ucHandlerName) > -1 && helpers.isFunction(options.handlers[handlerName]) && options.handlers[handlerName].length === allowedEventHandlers[ucHandlerName]) {
                self.customEventHandlers[ucHandlerName] = options.handlers[handlerName];
            }
        });
    }

    // Fill runtimeInfo from given Tasks
    if (options.tasks && Object.getOwnPropertyNames(options.tasks).length > 0) {
        var tempPriority = 1;
        Object.getOwnPropertyNames(options.tasks).forEach(function (task) {
            // Populate runtimeInfo for each task
            self.runtimeInfo[task] = {
                "desiredInstances": options.tasks[task].instances || 1,
                "requestedInstances": 0,
                "runningInstances": {},
                "priority": options.tasks[task].priority || tempPriority
            };
            // Increase priority
            tempPriority++;
        });
    }

    if (options.hasOwnProperty("tasks")) {
        self.logger.debug(JSON.stringify(helpers.sortTasksByPriority(options.tasks)));
    }

    // Store the long-running request
    self.httpRequest = {};

    // Handling of ZooKeeper-related stuff
    if (self.options.useZk) {

        self.logger.debug("Using ZooKeeper for persistency. Connection is " + self.options.zkUrl);

        // Set default path for the service
        self.zkServicePath = self.options.zkPrefix + self.options.frameworkName;

        // Create ZK client (getting from options is only to be used for unit test!)
        self.zkClient = options.zkClient || zookeeper.createClient(self.options.zkUrl);

        // Instantiate TaskHelper (getting from options is only to be used for unit test!)
        self.taskHelper = options.taskHelper || new TaskHelper(self);

        // For unit test
        if (options.taskHelper) {
            self.taskHelper.scheduler = self;
        }

        // Set path for the framework id
        var zkPath = self.zkServicePath + "/framework-id";

        self.zkClient.on("error", function (error) {
            self.logger.error(error);
        });

        // Once connected, check if path exists
        self.zkClient.once("connected", function () {

            self.logger.debug("Connected to ZooKeeper on " + self.options.zkUrl);

            // Check if path with framework id exists
            self.zkClient.getData(zkPath, null, function (error, data, stat) {

                self.logger.debug("error before if:" + JSON.stringify(error));
                if (error) {

                    self.logger.debug("error:" + JSON.stringify(error));
                    // Check if node doesn't exist yet
                    if (error.getCode() === zookeeper.Exception.NO_NODE) {

                        self.logger.debug("Node " + zkPath + " doesn't exist yet. Will be created on framework launch");

                        // Add event handler for the SUBSCRIBE event, to set the framework id to ZooKeeper
                        self.once("subscribed", function (obj) {

                            self.logger.debug("Got subscribed event");

                            self.logger.debug("now creating path " + zkPath);

                            // Seperating path creation from data save due to various client bugs.
                            self.zkClient.mkdirp(zkPath, function (error, stat) {

                                if (error) {
                                    self.logger.error("Got error when creating a ZK node for the framework ID: " + error.stack);
                                    self.options.useZk = false;
                                    self.zkClient.close();
                                } else {
                                    self.zkClient.setData(zkPath, new Buffer(self.frameworkId), function (error, stat) {
                                        if (error) {
                                            self.logger.error("Got error when saving the framework ID on ZK: " + error.stack);
                                            self.options.useZk = false;
                                            self.zkClient.close();
                                        } else {
                                            self.logger.debug("Successfully set framework ID")
                                        }
                                    });
                                }

                            });

                        });

                        // We're ready to subscribe
                        // Timeout to let init finish
                        setTimeout(function() {
                            self.emit("ready");
                        }, 100);

                    } else {
                        // Other error
                        self.logger.error(error.stack);
                    }

                } else if (data) {
                    self.logger.debug("Got framework ID from ZooKeeper:" + data.toString());
                    // Set framework id to existing one from ZooKeeper
                    self.frameworkId = data.toString();
                    // Load tasks from ZooKeeper
                    self.taskHelper.loadTasks();
                    // "ready" event is emitted after successfully loading the tasks!
                }

            });

        });

        // Connect to ZooKeeper
        self.zkClient.connect();

    } else {
        // Timeout to let init finish
        setTimeout(function() {
            self.emit("ready");
        }, 100);
    }

}

// Inhertit from EventEmitter
util.inherits(Scheduler, EventEmitter);

/**
 * Subscribes the scheduler with the master to receive events. A scheduler must send other calls only after it has received the SUBCRIBED event.
 */
Scheduler.prototype.subscribe = function () {

    var self = this;

    /**
     * The handler funciton for the incoming Mesos master events for this framework.
     * @param {object} eventData - The data object for an incoming event. Contains the event details (type etc.).
     */
    function handleEvent (eventData) {

        try {

            var event = JSON.parse(eventData);

            // Determine event handler, use custom one if it exists
            if (self.customEventHandlers[event.type]) {
                // Call custom handler
                self.customEventHandlers[event.type].call(self, event[event.type.toLocaleLowerCase()]);
            } else {
                // Call default handler
                schedulerHandlers[event.type].call(self, event[event.type.toLocaleLowerCase()]);
            }

            // Emit events per type
            if (event.type === "SUBSCRIBED") {
                // Set frameworkId
                self.frameworkId = event[event.type.toLocaleLowerCase()].framework_id.value;
                // TODO: Check!
                self.sync();
                self.subscribeBackoffTime = 0;
                // Emit with usable object details
                self.emit("subscribed", { frameworkId: event[event.type.toLocaleLowerCase()].framework_id.value, mesosStreamId: self.mesosStreamId });
            } else if (event.type === "HEARTBEAT") {
                // Set lastHeartbeat timestamp
                self.lastHeartbeat = new Date().getTime();
                // Emit with current timestamp
                self.emit(event.type.toLocaleLowerCase(), self.lastHeartbeat);
            } else if (event.type === "MESSAGE") {
                // Emit with usable message object (parsed to ascii)
                self.emit("message", { agentId: event[event.type.toLocaleLowerCase()].agent_id, executorId: event[event.type.toLocaleLowerCase()].executor_id, data: new Buffer(event[event.type.toLocaleLowerCase()].data, "base64").toString("ascii") });
            } else if (event.type === "ERROR") {
                // Emit an actual error object to identify errors from mesos master
                self.emit("error", new Error("Error received from Mesos master: " + event[event.type.toLocaleLowerCase()].message));
            } else {
                // Emit original objects for all other types
                self.emit(event.type.toLocaleLowerCase(), event[event.type.toLocaleLowerCase()]);
            }

        } catch (error) {
            self.emit("error", { message: "Couldn't parse as JSON: " + eventData, stack: (error.stack || "") });
        }

    }

    function handleRedirect(location) {

        // Redirection to another Master received
        self.logger.info("SUBSCRIBE: Redirect Location: " + location);

        // Derive the leader info
        var leaderInfo = location.replace(/\/\//g, "").split(":");

        // Check for scheme and move window accordingly
        var schemeIndex = leaderInfo.length > 2 ? 0 : -1;

        // Set new leading master info
        self.options.masterUrl = leaderInfo[schemeIndex + 1];

        // If the port part contains slashes -> URLs, then fiix it by just getting the port
        if (leaderInfo[schemeIndex + 2].indexOf("\/") > -1) {
            var temp = leaderInfo[schemeIndex + 2].split("/");
            self.options.port = temp[0];
        } else {
            self.options.port = leaderInfo[schemeIndex + 2];
        }

        self.logger.info("SUBSCRIBE: Leader info: " + self.options.masterUrl + ":" + self.options.port);

        // Fill the requestTemplate
        self.generateRequestTemplate();

    }

    var handledTimeout = false;

    self.httpRequest = http.request(self.requestTemplate, function (res) {

        self.logger.info("SUBSCRIBE: Response status: " + res.statusCode);

        if (res.statusCode === 307 && res.headers["location"]) {

            // Handle redirect information
            handleRedirect(res.headers["location"]);

            // Try to re-register
            self.subscribe();

        } else if (res.statusCode === 200) {
            if (!res.headers["mesos-stream-id"]) {
                self.emit("error", { message: "Mesos-Stream-Id header field was not found!"})
            } else {

                // Set mesosStreamId
                self.mesosStreamId = res.headers["mesos-stream-id"];

                // Set encoding to UTF8
                res.setEncoding('utf8');

                // Emit sent_subscribe event
                self.emit("sent_subscribe", { mesosStreamId: self.mesosStreamId });

                // Local cache for chunked JSON messages
                var cache = "";
                var expectedLength = 0;

                // Watch for data/chunks
                res.on('data', function (chunk) {

                    if (chunk instanceof Buffer) {
                        chunk = chunk.toString();
                    }

                    if (chunk.indexOf("\n") > -1) {
                        var temp = chunk.split("\n");
                        if (temp.length === 2) {
                            expectedLength = parseInt(temp[0]);
                            if (temp[1].length < expectedLength) {
                                // Add to cache
                                cache += temp[1];
                            } else {
                                // Empty cache
                                cache = "";
                                expectedLength = 0;
                                // Handle event
                                handleEvent(temp[1]);
                            }
                        } else {
                            self.emit("error", { message: "Other linebreak count found than expected! Actual count: " + temp.length });
                        }
                    } else {
                        if (cache.length > 0 && (cache.length + chunk.length) >= expectedLength) {
                            // Concatenate cached partial data with this chunk and handle only when done
                            var eventData = cache + chunk;
                            // Handle event
                            handleEvent(eventData);
                            // Empty cache
                            cache = "";
                            expectedLength = 0;
                        } else if (cache.length > 0 && (cache.length + chunk.length) < expectedLength) {
                            // Concatenate cached data with current chunk, for cases in which the stream buffer is smaller than the data.
                            cache += chunk;
                        }
                    }
                });

                res.on('end', function () {
                    self.emit("error", { message: "Long-running connection was closed!" });
                    self.logger.info("Long-running connection was closed!");
                    if (!handledTimeout) {
                        self.backOff();
                        // Re-subscribe
                        // We need to remove the stream id from the headers before re-subscribing!
                        self.mesosStreamId = undefined;
                        delete self.requestTemplate.headers["mesos-stream-id"];
                        delete self.requestTemplate.headers["Mesos-Stream-Id"];
                        self.subscribe();
                    }
                });

                res.on('finish', function () {
                    self.logger.info("FINISH!");
                });

                res.on('close', function () {
                    self.logger.info("CLOSE!");
                });

            }

        } else {
            res.on("data",function (chunk) {
                if (chunk.length > 0) {
                    self.emit("error", {message: "Error registering with mesos: " + chunk.toString() + " , code: " + res.statusCode.toString()});
                } else {
                    self.emit("error", {message: "Error registering with mesos - empty response, code: " + res.statusCode.toString()});
                }
            });
        }

    });

    self.httpRequest.on('error', function (e) {
        self.emit("error", { message: "There was a problem with the request: " + (e.message ? e.message : JSON.stringify(e)) });
    });

    // Register a timeout for triggering of re-registrations of the scheduler
    self.httpRequest.on('socket', function (socket) {
        var httpRequest = self.httpRequest;
        socket.setTimeout(self.options.masterConnectionTimeout);
        socket.on('timeout', function() {
            self.logger.error("Received a timeout on the long-running Master connection! Will try to re-register the framework scheduler!");
            handledTimeout = true;
            socket.destroy();
            // Make sure the timeout is not re-emitted.
            socket.setTimeout(0);
            if (httpRequest !== self.httpRequest) {
                self.logger.info("Already reconnected, not attempting again.");
                return;
            }

            // Backing off before resubscribe
            self.backOff();

            // If we're using Mesos DNS, we can directy re-register, because Mesos DNS will discover the current leader automatically
            if (self.options.masterUrl === "leader.mesos") {
                // We need to remove the stream id from the headers before re-subscribing!
                self.mesosStreamId = undefined;
                delete self.requestTemplate.headers["mesos-stream-id"];
                delete self.requestTemplate.headers["Mesos-Stream-Id"];
                self.logger.info("Using Mesos DNS, will re-register to 'leader.mesos'!");
                // Subscribe
                self.subscribe();
            } else {
                self.logger.info("Not using Mesos DNS, try to get new leader through redirection!");
                // If not, it's more difficult. When a IP address is passed for the Master, and the Master is unavailable,
                // we cannot use the Master detection via location headers, as outlined at http://mesos.apache.org/documentation/latest/scheduler-http-api/ (chapter "Master detection"),
                // because the request will not be successful. So, first we'll try the redirect method (in case of a leader change), if that is not possible, we have to shut down our framework
                // unless there is a better way in the future.
                var redirectRequest = http.request(self.requestTemplate, function (res) {
                    // Check if we received a redirect
                    if (res.statusCode === 307 && res.headers["location"]) {
                        self.logger.info("Received redirection information. Will attempt to re-register the framework scheduler!");
                        // Handle redirect information
                        handleRedirect(res.headers["location"]);
                        // Subscribe
                        self.subscribe();
                    }
                });
                // Set timeout for redirect request. When it's triggered, we know that the last leading master is down and that we cannot get the current leader information from it.
                // So, we have to shutdown the framework scheduler, because we're out of options.
                redirectRequest.on('socket', function (socket) {
                    socket.setTimeout(self.options.masterConnectionTimeout);
                    socket.on('timeout', function() {
                        self.logger.error("Couldn't receive a response for the redirect request from the last leading master!");
                        self.logger.error("There's no way to recover, the framework scheduler will halt now!");
                        process.exit(1);
                    });
                });
            }

        });
    });

    // Set the Subscribe object
    var Subscribe = new Builder("mesos.scheduler.Call.Subscribe")
        .setFrameworkInfo(new Builder("mesos.FrameworkInfo")
            .setUser(self.options.user)
            .setRole(self.options.role)
            .setName(self.options.frameworkName)
            .setId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
            .setFailoverTimeout(self.options.frameworkFailoverTimeout)
            .setHostname(process.env.HOST ? process.env.HOST : null)
            .setWebuiUrl(process.env.HOST && process.env.PORT0 ? "http://" + process.env.HOST + ":" + process.env.PORT0 : null)
        );

    self.logger.info("SUBSCRIBE: " + JSON.stringify(Subscribe));

    // Set the Call object
    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.SUBSCRIBE)
        .setSubscribe(Subscribe)
    );

    setTimeout(function () {
        // Write data to request body
        self.httpRequest.write(JSON.stringify(Call));

        // End request
        self.httpRequest.end();
    }, self.subscribeBackoffTime);

};

/**
 * Accept incoming offers to actually start the framework scheduler.
 * @param {array} offersIds - The array of {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L49|OfferID}s which should be accepted.
 * @param {array} operations - The array of {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L1903|Operation} objects.
 * @param {object} filters - The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L2488|Filters} object.
 */
Scheduler.prototype.accept = function (offersIds, operations, filters) {

    var self = this;

    // Set the Accept object
    var Accept = new Builder("mesos.scheduler.Call.Accept")
            .setOfferIds(offersIds)
            .setOperations(operations)
            .setFilters(filters);

    self.logger.info("ACCEPT: " + JSON.stringify(Accept));

    // Set the Call object
    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(new mesos.FrameworkID(self.frameworkId))
        .setType(mesos.scheduler.Call.Type.ACCEPT)
        .setAccept(Accept)
    );

    self.logger.debug("Assembled ACCEPT call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_accept");
        }
    });

};

/**
 * Decline incoming offers because they are not needed by the framework scheduler currently.
 * @param {array} offersIds - The array of {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L49|OfferID}s which should be declined.
 * @param {object} filters - The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L2488|Filters} object.
 */
Scheduler.prototype.decline = function (offersIds, filters) {

    var self = this;

    // Set the Decline object
    var Decline = new Builder("mesos.scheduler.Call.Decline")
        .setOfferIds(offersIds)
        .setFilters(filters);

    // Set the Call object
    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(new mesos.FrameworkID(self.frameworkId))
        .setType(mesos.scheduler.Call.Type.DECLINE)
        .setDecline(Decline)
    );

    self.logger.debug("Assembled DECLINE call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_decline");
        }
    });

};

/**
 * Tear down the framework scheduler. When Mesos receives this request it will shut down all executors (and consequently kill tasks).
 * It then removes the framework and closes all open connections from this scheduler to the Master.
 */
Scheduler.prototype.teardown = function () {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.TEARDOWN)
    );

    self.logger.debug("Assembled TEARDOWN call: " + JSON.stringify(Call));
    
    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_teardown");
        }
    });

};

/**
 * Remove any/all filters that it has previously set via ACCEPT or DECLINE calls.
 */
Scheduler.prototype.revive = function () {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.REVIVE)
    );

    self.logger.debug("Assembled REVIVE call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_revive");
        }
    });

};

/**
 *  Kill a specific task. If the scheduler has a custom executor, the kill is forwarded to the executor; it is up to the executor to kill the task and send a TASK_KILLED (or TASK_FAILED) update.
 *  Mesos releases the resources for a task once it receives a terminal update for the task. If the task is unknown to the master, a TASK_LOST will be generated.
 * @param {Object} taskId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L71|TaskID} to kill.
 * @param {Object} agentId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L59|AgentID} the task is running on.
 */
Scheduler.prototype.kill = function (taskId, agentId) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.KILL)
        .setKill(new Builder("mesos.scheduler.Call.Kill")
            .setTaskId(new Builder("mesos.TaskID").setValue(taskId))
            .setAgentId(new Builder("mesos.AgentID").setValue(agentId))
        )
    );

    self.logger.debug("Assembled KILL call: " + JSON.stringify(Call));

    self.logger.debug("Killing task ID: " + taskId);

    if (self.options.useZk)
        self.taskHelper.deleteTask(taskId);

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_kill");
        }
    });

};

/**
 * shutdown a specific custom executor (NOTE: This is a new call that was not present in the old API). When an executor gets a shutdown event, it is expected to kill all its tasks (and send TASK_KILLED updates) and terminate.
 * If an executor doesn’t terminate within a certain timeout (configurable via “–executor_shutdown_grace_period” agent flag), the agent will forcefully destroy the container (executor and its tasks) and transitions its active tasks to TASK_LOST.
 * @param {string} agentId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L59|AgentID} the task is running on.
 * @param {string} executorId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L81|ExecutorID} whcih runs the task.
 */
Scheduler.prototype.shutdown = function (agentId, executorId) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.SHUTDOWN)
        .setShutdown(new Builder("mesos.scheduler.Call.Shutdown")
            .setExecutorId(new Builder("mesos.ExecutorID").setValue(executorId))
            .setAgentId(new Builder("mesos.AgentID").setValue(agentId))
        )
    );

    self.logger.debug("Assembled SHUTDOWN call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_shutdown");
        }
    });

};

/**
 * Acknowledge a status update.
 * @param {object} update The status update to acknowledge.
 */
Scheduler.prototype.acknowledge = function (update) {

    var self = this;

    if (!update.status.uuid) {
        self.logger.debug("An update without UUID received, acknowledge skipped. Update: " + JSON.stringify(update));
        return;
    }

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.ACKNOWLEDGE)
        .setAcknowledge(new Builder("mesos.scheduler.Call.Acknowledge")
            .setTaskId(new Builder("mesos.TaskID").setValue(update.status.task_id))
            .setAgentId(new Builder("mesos.AgentID").setValue(update.status.agent_id))
            .setUuid(update.status.uuid)
        )
    );

    self.logger.debug("Assembled ACKNOWLEDGE call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_acknowledge");
        }
    });

};

/**
 * query the status of non-terminal tasks. This causes the master to send back UPDATE events for each task in the list. Tasks that are no longer known to Mesos will result in TASK_LOST updates.
 * If the list of tasks is empty, master will send UPDATE events for all currently known tasks of the framework.
 * @param {string} taskId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L71|TaskID} to kill.
 * @param {string} agentId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L59|AgentID} the task is running on.
 */
Scheduler.prototype.reconcile = function (taskId, agentId) {

    var self = this;

    var Call = null;

    if (taskId && agentId) {
        Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
            .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
            .setType(mesos.scheduler.Call.Type.RECONCILE)
            .setReconcile(new Builder("mesos.scheduler.Call.Reconcile")
                .setTasks(new Builder("mesos.scheduler.Call.Reconcile.Task")
                    .setTaskId(new Builder("mesos.TaskID").setValue(taskId))
                    .setAgentId(new Builder("mesos.AgentID").setValue(agentId)
                    )
                )
            )
        );
        self.logger.debug("Reconciling task ID: " + taskId);
    } else {
        Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
            .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
            .setType(mesos.scheduler.Call.Type.RECONCILE)
            .setReconcile(new Builder("mesos.scheduler.Call.Reconcile")
                .setTasks([])
            )
        );
        self.logger.debug("Reconciling all tasks ");
    }

    self.logger.debug("Assembled RECONCILE call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_reconcile");
        }
    });

};

/**
 * Send arbitrary data to the executor. Note that Mesos neither interprets this data nor makes any guarantees about the delivery of this message to the executor.
 * @param {string} agentId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L59|AgentID} the task is running on.
 * @param {string} executorId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L81|ExecutorID} which runs the task.
 * @param {string} data The string which's raw bytes will be encoded in Base64.
 */
Scheduler.prototype.message = function (agentId, executorId, data) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.MESSAGE)
        .setMessage(new Builder("mesos.scheduler.Call.Message")
            .setExecutorId(new Builder("mesos.ExecutorID").setValue(executorId))
            .setAgentId(new Builder("mesos.AgentID").setValue(agentId))
            .setData(new Buffer(data).toString("base64"))
        )
    );

    self.logger.debug("Assembled MESSAGE call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_message");
        }
    });

};

/**
 * Request resources from the master/allocator. The built-in hierarchical allocator simply ignores this request but other allocators (modules) can interpret this in a customizable fashion.
 * @param {array} requests The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L1858|Request} objects which should be sent to the server.
 */
Scheduler.prototype.request = function (requests) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.REQUEST)
        .setRequest(new Builder("mesos.scheduler.Call.Request")
            .setRequests(requests)
        )
    );

    self.logger.debug("Assembled REQUEST call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_request");
        }
    });

};

/**
 * Suppress offers for the specified roles. If `roles` is empty, the `SUPPRESS` call will suppress offers for all of the roles the framework is currently subscribed to.
 * @param {array} roles The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L3390|Role} objects which should be sent to the server.
 */
Scheduler.prototype.suppress = function (roles) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.SUPPRESS)
        .setSuppress(new Builder("mesos.scheduler.Call.Suppress")
            .setRoles(roles)
        )
    );

    self.logger.debug("Assembled SUPPRESS call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_suppress");
        }
    });

};

/**
 * Accepts an inverse offer. Inverse offers should only be accepted if the resources in the offer can be safely evacuated before the provided unavailability.
 * @param {array} inverseOffersIds The {@Link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L49|OfferID} array which should be sent to the server.
 * @param {object} filters The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L2488|Filters} object which should be sent to the server.
 */
Scheduler.prototype.acceptInverseOffers = function (inverseOffersIds, filters) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.ACCEPT_INVERSE_OFFERS)
        .setAcceptInverseOffers(new Builder("mesos.scheduler.Call.AcceptInverseOffers")
            .setInverseOfferIds(inverseOffersIds)
            .setFilter(filters)
        )
    );

    self.logger.debug("Assembled ACCEPT_INVERSE_OFFERS call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_accept_inverse_offers");
        }
    });

};

/**
 * Declines an inverse offer. Inverse offers should be declined if the resources in the offer might not be safely evacuated before the provided unavailability.
 * @param {array} inverseOffersIds The {@Link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L49|OfferID} array which should be sent to the server.
 * @param {object} filters The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L2488|Filters} object which should be sent to the server.
 */
Scheduler.prototype.declineInverseOffers = function (inverseOffersIds, filters) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.DECLINE_INVERSE_OFFERS)
        .setDeclineInverseOffers(new Builder("mesos.scheduler.Call.DeclineInverseOffers")
            .setInverseOfferIds(inverseOffersIds)
            .setFilter(filters)
        )
    );

    self.logger.debug("Assembled DECLINE_INVERSE_OFFERS call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_decline_inverse_offers");
        }
    });

};

/**
 * Acknowledges the receipt of an operation status update. Schedulers are responsible for explicitly acknowledging the receipt of updates which have the 'UpdateOperationStatus.status().uuid()' field set. Such status updates are retried by the agent or resource provider until they are acknowledged by the scheduler.
 * @param {string} agentId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L59|AgentID} the task is running on.
 * @param {string} resourceProviderId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L102|ResourceProviderId}
 * @param {string} operationId The {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/mesos.proto#L111|OperationID}
 * @param {string} uuid The uuid of the Operation
 */
Scheduler.prototype.acknowledgeOperationStatus = function (agentId, resourceProviderId, operationId, uuid) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.ACKNOWLEDGE_OPERATION_STATUS)
        .setAcknowledgeOperationStatus(new Builder("mesos.scheduler.Call.AcknowledgeOperationStatus")
            .setAgentId(new Builder("mesos.AgentID").setValue(agentId))
            .setResourceProviderId(new Builder("mesos.ResourceProviderId").setValue(resourceProviderId))
            .setOperationId(new Builder("mesos.ResourceProviderId").setValue(operationId))
            .setUuid(uuid)
        )
    );

    self.logger.debug("Assembled ACKNOWLEDGE_OPERATION_STATUS call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_acknowledge_operation_status");
        }
    });

};

/**
 * Allows the scheduler to query the status of operations. This causes the master to send back the latest status for each operation in 'operations', if possible. If 'operations' is empty, then the master will send the latest status for each operation currently known.
 * @param {array} operations An array of {@link https://github.com/apache/mesos/blob/1.5.x/include/mesos/v1/scheduler/scheduler.proto#L420|Operation} objects to query.
 */
Scheduler.prototype.reconcileOperations = function (operations) {

    var self = this;

    var Call = helpers.fixEnums(new Builder("mesos.scheduler.Call")
        .setFrameworkId(self.frameworkId ? new mesos.FrameworkID(self.frameworkId) : null)
        .setType(mesos.scheduler.Call.Type.RECONCILE_OPERATIONS)
        .setReconcileOperations(new Builder("mesos.scheduler.Call.ReconcileOperations")
            .setOperations(operations)
        )
    );

    self.logger.debug("Assembled RECONCILE_OPERATIONS call: " + JSON.stringify(Call));

    helpers.doRequest.call(self, Call, function (error, response) {
        if (error) {
            self.emit("error", error.message);
        } else {
            self.emit("sent_reconcile_operations");
        }
    });

};

/**
 * Get the running tasks of this framework scheduler.
 * @returns {Array} The running task array.
 */
Scheduler.prototype.getRunningTasks = function () {

    var self = this;
    var runningTasks = [];

    Object.getOwnPropertyNames(self.runtimeInfo).forEach(function (taskType) {
        Object.getOwnPropertyNames(self.runtimeInfo[taskType].runningInstances).forEach(function (task) {
            runningTasks.push(task);
        });
    });

    return runningTasks;

};

/**
 * Synchronize the tasks of this scheduler.
 */
Scheduler.prototype.sync = function () {
    var self = this;
    for (var i = 0; i < self.reconcileTasks.length; i++) {
        if (self.reconcileTasks[i].runtimeInfo.agentId) {
            self.reconcile(self.reconcileTasks[i].taskId, self.reconcileTasks[i].runtimeInfo.agentId);
        } else {
            if (self.options.useZk)
                self.taskHelper.deleteTask(self.reconcileTasks[i].taskId);
        }
    }
    self.reconcileTasks = [];
    self.launchedTasks.forEach(function (task) {
        if (task.runtimeInfo.agentId) {
            self.reconcile(task.taskId, task.runtimeInfo.agentId);
        }
    });
    for (var i = 0; i < self.killTasks.length; i++) {
        self.kill(self.killTasks[i].taskId, self.killTasks[i].runtimeInfo.agentId);
    }
    self.killTasks = [];
    if (self.options.useZk) {
        self.reconcile();
    }
};

/**
 * Calculate the backOff time (for reconnection trials)
 */
Scheduler.prototype.backOff = function () {
    var self = this;
    self.subscribeBackoffTime *= self.options.exponentialBackoffFactor;
    self.subscribeBackoffTime = Math.round(self.subscribeBackoffTime);
    if (self.subscribeBackoffTime === 0) {
        self.subscribeBackoffTime = self.options.exponentialBackoffMinimum;
    }
    if (self.subscribeBackoffTime > self.options.exponentialBackoffMaximum) {
        self.subscribeBackoffTime = self.options.exponentialBackoffMaximum;
    }
    self.logger.debug("Backoff time: " + self.subscribeBackoffTime);
};

module.exports = Scheduler;