/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * These .proto interfaces are private and stable. * Please see http://wiki.apache.org/hadoop/Compatibility * for what changes are allowed for a *stable* .proto interface. */ // This file contains protocol buffers that are used throughout HDFS -- i.e. // by the client, server, and data transfer protocols. option java_package = "org.apache.hadoop.hdfs.protocol.proto"; option java_outer_classname = "JournalProtocolProtos"; option java_generic_services = true; option java_generate_equals_and_hash = true; package hadoop.hdfs; import "hdfs.proto"; /** * Journal information used by the journal receiver to identify a journal. */ message JournalInfoProto { required string clusterID = 1; // ID of the cluster optional uint32 layoutVersion = 2; // Layout version optional uint32 namespaceID = 3; // Namespace ID } /** * journalInfo - the information about the journal * firstTxnId - the first txid in the journal records * numTxns - Number of transactions in editlog * records - bytes containing serialized journal records * epoch - change to this represents change of journal writer */ message JournalRequestProto { required JournalInfoProto journalInfo = 1; required uint64 firstTxnId = 2; required uint32 numTxns = 3; required bytes records = 4; required uint64 epoch = 5; } /** * void response */ message JournalResponseProto { } /** * journalInfo - the information about the journal * txid - first txid in the new log */ message StartLogSegmentRequestProto { required JournalInfoProto journalInfo = 1; // Info about the journal required uint64 txid = 2; // Transaction ID required uint64 epoch = 3; } /** * void response */ message StartLogSegmentResponseProto { } /** * journalInfo - the information about the journal * txid - first txid in the new log */ message FenceRequestProto { required JournalInfoProto journalInfo = 1; // Info about the journal required uint64 epoch = 2; // Epoch - change indicates change in writer optional string fencerInfo = 3; // Info about fencer for debugging } /** * previousEpoch - previous epoch if any or zero * lastTransactionId - last valid transaction Id in the journal * inSync - if all journal segments are available and in sync */ message FenceResponseProto { optional uint64 previousEpoch = 1; optional uint64 lastTransactionId = 2; optional bool inSync = 3; } /** * Protocol used to journal edits to a remote node. Currently, * this is used to publish edits from the NameNode to a BackupNode. * * See the request and response for details of rpc call. */ service JournalProtocolService { /** * Request sent by active namenode to backup node via * EditLogBackupOutputStream to stream editlog records. */ rpc journal(JournalRequestProto) returns (JournalResponseProto); /** * Request sent by active namenode to backup node to notify * that the NameNode has rolled its edit logs and is now writing a * new log segment. */ rpc startLogSegment(StartLogSegmentRequestProto) returns (StartLogSegmentResponseProto); /** * Request to fence a journal receiver. */ rpc fence(FenceRequestProto) returns (FenceResponseProto); }