/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Cassandra client interface */ @namespace("org.apache.cassandra.avro") protocol Cassandra { record ColumnPath { string column_family; union { bytes, null } super_column; union { bytes, null } column; } record ColumnParent { string column_family; union { bytes, null } super_column; } record Column { bytes name; bytes value; long timestamp; union { int, null } ttl; } record SuperColumn { bytes name; array columns; } record ColumnOrSuperColumn { union { Column, null } column; union { SuperColumn, null } super_column; } record SliceRange { bytes start; bytes finish; boolean reversed; int count; union { array, null } bitmasks; } record SlicePredicate { union { array, null } column_names; union { SliceRange, null } slice_range; } record TokenRange { string start_token; string end_token; array endpoints; } enum IndexOperator { EQ, GTE, GT, LTE, LT } record IndexExpression { bytes column_name; IndexOperator op; bytes value; } record IndexClause { array expressions; bytes start_key; int count; } /** * The semantics of start keys and tokens are slightly different. * Keys are start-inclusive; tokens are start-exclusive. Token * ranges may also wrap -- that is, the end token may be less * than the start one. Thus, a range from keyX to keyX is a * one-element range, but a range from tokenY to tokenY is the * full ring. */ record KeyRange { union { bytes, null } start_key; union { bytes, null } end_key; union { string, null } start_token; union { string, null } end_token; int count; } record KeySlice { bytes key; array columns; } record Deletion { long timestamp; union { bytes, null } super_column; union { SlicePredicate, null } predicate; } record Mutation { union { ColumnOrSuperColumn, null } column_or_supercolumn; union { Deletion, null } deletion; } @aliases(["org.apache.cassandra.config.avro.IndexType"]) enum IndexType { KEYS } /* describes a column in a column family. */ @aliases(["org.apache.cassandra.config.avro.ColumnDef"]) record ColumnDef { bytes name; string validation_class; union { IndexType, null } index_type; union { string, null } index_name; } /** * describes a keyspace: * NB: the id field is ignored during column family creation: the server will choose an appropriate value. */ @aliases(["org.apache.cassandra.config.avro.CfDef"]) record CfDef { string keyspace; string name; union { string, null } column_type; union { string, null } comparator_type; union { string, null } subcomparator_type; union { string, null } comment; union { double, null } row_cache_size; union { double, null } key_cache_size; union { double, null } read_repair_chance; union { int, null } gc_grace_seconds; union { null, string } default_validation_class = null; union { null, int } min_compaction_threshold = null; union { null, int } max_compaction_threshold = null; union { int, null } row_cache_save_period_in_seconds = 0; union { int, null } key_cache_save_period_in_seconds = 3600; union { int, null } memtable_flush_after_mins = 60; union { null, int } memtable_throughput_in_mb = null; union { null, double} memtable_operations_in_millions = null; union { int, null } id; union { array, null } column_metadata; } /* describes a keyspace. */ @aliases(["org.apache.cassandra.config.avro.KsDef"]) record KsDef { string name; string strategy_class; union{ map, null } strategy_options; int replication_factor; array cf_defs; } record StreamingMutation { bytes key; Mutation mutation; } record MutationsMapEntry { bytes key; map> mutations; } record CoscsMapEntry { bytes key; array columns; } record KeyCountMapEntry { bytes key; int count; } record AuthenticationRequest { map credentials; } enum ConsistencyLevel { ONE, QUORUM, LOCAL_QUORUM, EACH_QUORUM, ALL } error InvalidRequestException { union { string, null } why; } error NotFoundException { union { string, null } why; } error UnavailableException { union { string, null } why; } error TimedOutException { union { string, null } why; } error AuthenticationException { union { string, null } why; } error AuthorizationException { union { string, null } why; } void login (AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException; void set_keyspace(string keyspace) throws InvalidRequestException; ColumnOrSuperColumn get(bytes key, ColumnPath column_path, ConsistencyLevel consistency_level) throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException; /** * Get the group of columns contained by a column_parent (either a * ColumnFamily name or a ColumnFamily/SuperColumn name pair) specified * by the given SlicePredicate. If no matching values are found, an empty * list is returned. */ array get_slice(bytes key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; /** * Performs a get_slice for column_parent and predicate against the given * set of keys in parallel. */ array multiget_slice(array keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; /** * Performs a get_count in parallel on the given list of keys. The * return value maps keys to the count found. */ array multiget_count(array keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; /** * Returns the subset of columns specified in SlicePredicate for * the rows matching the IndexClause. */ array get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; /** * Returns the number of columns matching a predicate for a particular * key, ColumnFamily, and optionally SuperColumn. */ int get_count(bytes key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; void insert(bytes key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; void remove(bytes key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; void batch_mutate(array mutation_map, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; /** * Truncate will mark and entire column family as deleted. From the user's * perspective a successful call to truncate will result in complete data * deletion from column family. Internally, however, disk space will not be * immediately released, as with all deletes in Cassandra, this one only * marks the data as deleted. The operation succeeds only if all hosts in * the cluster at available and will throw an UnavailableException if some * hosts are down. */ void truncate(string column_family) throws InvalidRequestException, UnavailableException; /** * Ask the cluster if they all are using the same migration id. Returns a * map of version->hosts-on-that-version. Hosts that did not respond will * be under the key DatabaseDescriptor.INITIAL_VERSION. Agreement can be * determined by checking if the size of the map is 1. */ map> check_schema_agreement() throws InvalidRequestException; string system_add_column_family(CfDef cf_def) throws InvalidRequestException; string system_add_keyspace(KsDef ks_def) throws InvalidRequestException; string system_drop_column_family(string column_family) throws InvalidRequestException; string system_drop_keyspace(string keyspace) throws InvalidRequestException; string system_update_column_family(CfDef cf_def) throws InvalidRequestException; string system_update_keyspace(KsDef ks_def) throws InvalidRequestException; array describe_keyspaces(); KsDef describe_keyspace(string keyspace) throws NotFoundException; string describe_cluster_name(); string describe_version(); string describe_partitioner(); /** * experimental API for hadoop/parallel query support. * may change violently and without warning. * * returns list of token strings such that first subrange is (list[0], list[1]], * next is (list[1], list[2]], etc. */ array describe_splits(string cfName, string start_token, string end_token, int keys_per_split); /** * Get the token ring: a map of ranges to host addresses, represented as * an array of TokenRange->start-end range and list of host addresses. */ array describe_ring(string keyspace) throws InvalidRequestException; /** *returns a subset of columns for a contiguous range of keys. */ array get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException; }