public final class DruidStorageHandlerUtils extends Object
Modifier and Type | Class and Description |
---|---|
static interface |
DruidStorageHandlerUtils.DataPusher
Simple interface for retry operations
|
Modifier and Type | Field and Description |
---|---|
static com.google.common.collect.Interner<io.druid.timeline.DataSegment> |
DATA_SEGMENT_INTERNER
Generic Interner implementation used to read segments object from metadata storage
|
static org.joda.time.Interval |
DEFAULT_INTERVAL |
static String |
DEFAULT_TIMESTAMP_COLUMN |
static String |
DESCRIPTOR_JSON |
static String |
EVENT_TIMESTAMP_COLUMN |
static io.druid.segment.IndexIO |
INDEX_IO
Used by druid to perform IO on indexes
|
static io.druid.segment.IndexMergerV9 |
INDEX_MERGER_V9
Used by druid to merge indexes
|
static String |
INDEX_ZIP |
static com.fasterxml.jackson.databind.ObjectMapper |
JSON_MAPPER
Mapper to use to serialize/deserialize Druid objects (JSON)
|
static com.fasterxml.jackson.databind.ObjectMapper |
SMILE_MAPPER
Mapper to use to serialize/deserialize Druid objects (SMILE)
|
Constructor and Description |
---|
DruidStorageHandlerUtils() |
Modifier and Type | Method and Description |
---|---|
static void |
addDependencyJars(org.apache.hadoop.conf.Configuration conf,
Class<?>... classes) |
static String |
createScanAllQuery(String dataSourceName) |
static io.druid.segment.loading.DataSegmentPusher |
createSegmentPusherForDirectory(String segmentDirectory,
org.apache.hadoop.conf.Configuration configuration) |
static com.metamx.http.client.Request |
createSmileRequest(String address,
io.druid.query.Query query)
Method that creates a request for Druid query using SMILE format.
|
static boolean |
disableDataSource(io.druid.metadata.SQLMetadataConnector connector,
io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig,
String dataSource) |
static void |
disableDataSourceWithHandle(org.skife.jdbi.v2.Handle handle,
io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig,
String dataSource) |
static Collection<String> |
getAllDataSourceNames(io.druid.metadata.SQLMetadataConnector connector,
io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig) |
static List<io.druid.timeline.DataSegment> |
getCreatedSegments(org.apache.hadoop.fs.Path taskDir,
org.apache.hadoop.conf.Configuration conf) |
static List<io.druid.timeline.DataSegment> |
getDataSegmentList(io.druid.metadata.SQLMetadataConnector connector,
io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig,
String dataSource) |
static io.druid.java.util.common.Pair<List<io.druid.data.input.impl.DimensionSchema>,io.druid.query.aggregation.AggregatorFactory[]> |
getDimensionsAndAggregates(org.apache.hadoop.conf.Configuration jc,
List<String> columnNames,
List<TypeInfo> columnTypes) |
static io.druid.segment.indexing.granularity.GranularitySpec |
getGranularitySpec(org.apache.hadoop.conf.Configuration configuration,
Properties tableProperties) |
static io.druid.segment.IndexSpec |
getIndexSpec(org.apache.hadoop.conf.Configuration jc) |
static org.apache.hadoop.fs.Path |
getPath(io.druid.timeline.DataSegment dataSegment) |
static String |
getURL(com.metamx.http.client.HttpClient client,
URL url) |
static org.apache.hadoop.fs.Path |
makeSegmentDescriptorOutputPath(io.druid.timeline.DataSegment pushedSegment,
org.apache.hadoop.fs.Path segmentsDescriptorDir) |
static List<io.druid.timeline.DataSegment> |
publishSegmentsAndCommit(io.druid.metadata.SQLMetadataConnector connector,
io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig,
String dataSource,
List<io.druid.timeline.DataSegment> segments,
boolean overwrite,
org.apache.hadoop.conf.Configuration conf,
io.druid.segment.loading.DataSegmentPusher dataSegmentPusher)
First computes the segments timeline to accommodate new segments for insert into case
Then moves segments to druid deep storage with updated metadata/version
ALL IS DONE IN ONE TRANSACTION
|
static io.druid.timeline.DataSegment |
publishSegmentWithShardSpec(io.druid.timeline.DataSegment segment,
io.druid.timeline.partition.ShardSpec shardSpec,
String version,
org.apache.hadoop.fs.FileSystem fs,
io.druid.segment.loading.DataSegmentPusher dataSegmentPusher) |
static InputStream |
submitRequest(com.metamx.http.client.HttpClient client,
com.metamx.http.client.Request request)
Method that submits a request to an Http address and retrieves the result.
|
static void |
writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem outputFS,
io.druid.timeline.DataSegment segment,
org.apache.hadoop.fs.Path descriptorPath)
This function will write to filesystem serialized from of segment descriptor
if an existing file exists it will try to replace it.
|
public static final String DEFAULT_TIMESTAMP_COLUMN
public static final String EVENT_TIMESTAMP_COLUMN
public static final String INDEX_ZIP
public static final String DESCRIPTOR_JSON
public static final org.joda.time.Interval DEFAULT_INTERVAL
public static final com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER
public static final com.fasterxml.jackson.databind.ObjectMapper SMILE_MAPPER
public static final io.druid.segment.IndexIO INDEX_IO
public static final io.druid.segment.IndexMergerV9 INDEX_MERGER_V9
public static final com.google.common.collect.Interner<io.druid.timeline.DataSegment> DATA_SEGMENT_INTERNER
public static com.metamx.http.client.Request createSmileRequest(String address, io.druid.query.Query query) throws IOException
address
- query
- IOException
public static InputStream submitRequest(com.metamx.http.client.HttpClient client, com.metamx.http.client.Request request) throws IOException
client
- request
- IOException
public static String getURL(com.metamx.http.client.HttpClient client, URL url) throws IOException
IOException
public static List<io.druid.timeline.DataSegment> getCreatedSegments(org.apache.hadoop.fs.Path taskDir, org.apache.hadoop.conf.Configuration conf) throws IOException
taskDir
- path to the directory containing the segments descriptor info
the descriptor path will be .../workingPath/task_id/DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME
/*.jsonconf
- hadoop conf to get the file systemIOException
- can be for the case we did not produce data.public static void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem outputFS, io.druid.timeline.DataSegment segment, org.apache.hadoop.fs.Path descriptorPath) throws IOException
outputFS
- filesystemsegment
- DataSegment objectdescriptorPath
- pathIOException
public static Collection<String> getAllDataSourceNames(io.druid.metadata.SQLMetadataConnector connector, io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig)
connector
- SQL metadata connector to the metadata storagemetadataStorageTablesConfig
- Table configpublic static boolean disableDataSource(io.druid.metadata.SQLMetadataConnector connector, io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig, String dataSource)
connector
- SQL connector to metadatametadataStorageTablesConfig
- Tables configurationdataSource
- Name of data sourcepublic static List<io.druid.timeline.DataSegment> publishSegmentsAndCommit(io.druid.metadata.SQLMetadataConnector connector, io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig, String dataSource, List<io.druid.timeline.DataSegment> segments, boolean overwrite, org.apache.hadoop.conf.Configuration conf, io.druid.segment.loading.DataSegmentPusher dataSegmentPusher) throws org.skife.jdbi.v2.exceptions.CallbackFailedException
connector
- DBI connector to commitmetadataStorageTablesConfig
- Druid metadata tables definitionsdataSource
- Druid datasource namesegments
- List of segments to move and commit to metadataoverwrite
- if it is an insert overwriteconf
- ConfigurationdataSegmentPusher
- segment pusherorg.skife.jdbi.v2.exceptions.CallbackFailedException
public static void disableDataSourceWithHandle(org.skife.jdbi.v2.Handle handle, io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig, String dataSource)
public static List<io.druid.timeline.DataSegment> getDataSegmentList(io.druid.metadata.SQLMetadataConnector connector, io.druid.metadata.MetadataStorageTablesConfig metadataStorageTablesConfig, String dataSource)
connector
- SQL connector to metadatametadataStorageTablesConfig
- Tables configurationdataSource
- Name of data sourcepublic static org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(io.druid.timeline.DataSegment pushedSegment, org.apache.hadoop.fs.Path segmentsDescriptorDir)
pushedSegment
- segmentsDescriptorDir
- public static String createScanAllQuery(String dataSourceName) throws com.fasterxml.jackson.core.JsonProcessingException
com.fasterxml.jackson.core.JsonProcessingException
public static void addDependencyJars(org.apache.hadoop.conf.Configuration conf, Class<?>... classes) throws IOException
IOException
public static io.druid.segment.loading.DataSegmentPusher createSegmentPusherForDirectory(String segmentDirectory, org.apache.hadoop.conf.Configuration configuration) throws IOException
IOException
public static io.druid.timeline.DataSegment publishSegmentWithShardSpec(io.druid.timeline.DataSegment segment, io.druid.timeline.partition.ShardSpec shardSpec, String version, org.apache.hadoop.fs.FileSystem fs, io.druid.segment.loading.DataSegmentPusher dataSegmentPusher) throws IOException
IOException
public static org.apache.hadoop.fs.Path getPath(io.druid.timeline.DataSegment dataSegment)
public static io.druid.segment.indexing.granularity.GranularitySpec getGranularitySpec(org.apache.hadoop.conf.Configuration configuration, Properties tableProperties)
public static io.druid.segment.IndexSpec getIndexSpec(org.apache.hadoop.conf.Configuration jc)
Copyright © 2022 The Apache Software Foundation. All rights reserved.