[HUDI-3760] Adding capability to fetch Metadata Records by prefix (#5208)
- Adding capability to fetch Metadata Records by key prefix so that Data Skipping could fetch only Column Stats - Index records pertaining to the columns being queried by, instead of reading out whole Index. - Fixed usages of HFileScanner in HFileReader. few code paths uses cached scanner if available. Other code paths uses its own HFileScanner w/ positional read. Brief change log - Rebasing ColumnStatsIndexSupport to rely on HoodieBackedTableMetadata in lieu of reading t/h Spark DS - Adding methods enabling key-prefix lookups to HoodiFileReader, HoodieHFileReader - Wiring key-prefix lookup t/h LogRecordScanner impls - Cleaning up HoodieHFileReader impl Co-authored-by: sivabalan <n.siva.b@gmail.com> Co-authored-by: Sagar Sumit <sagarsumit09@gmail.com>
This commit is contained in:
@@ -17,22 +17,39 @@
|
||||
|
||||
package org.apache.hudi
|
||||
|
||||
import org.apache.hudi.ColumnStatsIndexSupport.{composeIndexSchema, deserialize, tryUnpackNonNullVal}
|
||||
import org.apache.hudi.metadata.{HoodieMetadataPayload, MetadataPartitionType}
|
||||
import org.apache.avro.Schema.Parser
|
||||
import org.apache.avro.generic.GenericRecord
|
||||
import org.apache.hudi.ColumnStatsIndexSupport.{composeIndexSchema, deserialize, metadataRecordSchemaString, metadataRecordStructType, tryUnpackNonNullVal}
|
||||
import org.apache.hudi.HoodieConversionUtils.toScalaOption
|
||||
import org.apache.hudi.avro.model.HoodieMetadataRecord
|
||||
import org.apache.hudi.client.common.HoodieSparkEngineContext
|
||||
import org.apache.hudi.common.config.HoodieMetadataConfig
|
||||
import org.apache.hudi.common.model.HoodieRecord
|
||||
import org.apache.hudi.common.table.view.FileSystemViewStorageConfig
|
||||
import org.apache.hudi.common.util.hash.ColumnIndexID
|
||||
import org.apache.hudi.data.HoodieJavaRDD
|
||||
import org.apache.hudi.metadata.{HoodieMetadataPayload, HoodieTableMetadata, HoodieTableMetadataUtil, MetadataPartitionType}
|
||||
import org.apache.spark.api.java.JavaSparkContext
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.catalyst.InternalRow
|
||||
import org.apache.spark.sql.catalyst.util.DateTimeUtils
|
||||
import org.apache.spark.sql.functions.col
|
||||
import org.apache.spark.sql.types._
|
||||
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
|
||||
import org.apache.spark.sql.{DataFrame, HoodieUnsafeRDDUtils, Row, SparkSession}
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
import scala.collection.immutable.TreeSet
|
||||
|
||||
/**
|
||||
* Mixin trait abstracting away heavy-lifting of interactions with Metadata Table's Column Stats Index,
|
||||
* providing convenient interfaces to read it, transpose, etc
|
||||
*/
|
||||
trait ColumnStatsIndexSupport {
|
||||
trait ColumnStatsIndexSupport extends SparkAdapterSupport {
|
||||
|
||||
def readColumnStatsIndex(spark: SparkSession, metadataTablePath: String): DataFrame = {
|
||||
def readColumnStatsIndex(spark: SparkSession,
|
||||
tableBasePath: String,
|
||||
metadataConfig: HoodieMetadataConfig,
|
||||
targetColumns: Seq[String] = Seq.empty): DataFrame = {
|
||||
val targetColStatsIndexColumns = Seq(
|
||||
HoodieMetadataPayload.COLUMN_STATS_FIELD_FILE_NAME,
|
||||
HoodieMetadataPayload.COLUMN_STATS_FIELD_MIN_VALUE,
|
||||
@@ -43,11 +60,17 @@ trait ColumnStatsIndexSupport {
|
||||
(targetColStatsIndexColumns :+ HoodieMetadataPayload.COLUMN_STATS_FIELD_COLUMN_NAME).map(colName =>
|
||||
s"${HoodieMetadataPayload.SCHEMA_FIELD_ID_COLUMN_STATS}.${colName}")
|
||||
|
||||
// Read Metadata Table's Column Stats Index into Spark's [[DataFrame]]
|
||||
val metadataTableDF = spark.read.format("org.apache.hudi")
|
||||
.load(s"$metadataTablePath/${MetadataPartitionType.COLUMN_STATS.getPartitionPath}")
|
||||
val metadataTableDF: DataFrame = {
|
||||
// NOTE: If specific columns have been provided, we can considerably trim down amount of data fetched
|
||||
// by only fetching Column Stats Index records pertaining to the requested columns.
|
||||
// Otherwise we fallback to read whole Column Stats Index
|
||||
if (targetColumns.nonEmpty) {
|
||||
readColumnStatsIndexForColumnsInternal(spark, targetColumns, metadataConfig, tableBasePath)
|
||||
} else {
|
||||
readFullColumnStatsIndexInternal(spark, tableBasePath)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO filter on (column, partition) prefix
|
||||
val colStatsDF = metadataTableDF.where(col(HoodieMetadataPayload.SCHEMA_FIELD_ID_COLUMN_STATS).isNotNull)
|
||||
.select(requiredMetadataIndexColumns.map(col): _*)
|
||||
|
||||
@@ -105,34 +128,40 @@ trait ColumnStatsIndexSupport {
|
||||
// of the transposed table
|
||||
val sortedColumns = TreeSet(targetColumns: _*)
|
||||
|
||||
val transposedRDD = colStatsDF.rdd
|
||||
.filter(row => sortedColumns.contains(row.getString(colStatsSchemaOrdinalsMap("columnName"))))
|
||||
.map { row =>
|
||||
val (minValue, _) = tryUnpackNonNullVal(row.getAs[Row](colStatsSchemaOrdinalsMap("minValue")))
|
||||
val (maxValue, _) = tryUnpackNonNullVal(row.getAs[Row](colStatsSchemaOrdinalsMap("maxValue")))
|
||||
val colNameOrdinal = colStatsSchemaOrdinalsMap(HoodieMetadataPayload.COLUMN_STATS_FIELD_COLUMN_NAME)
|
||||
val minValueOrdinal = colStatsSchemaOrdinalsMap(HoodieMetadataPayload.COLUMN_STATS_FIELD_MIN_VALUE)
|
||||
val maxValueOrdinal = colStatsSchemaOrdinalsMap(HoodieMetadataPayload.COLUMN_STATS_FIELD_MAX_VALUE)
|
||||
val fileNameOrdinal = colStatsSchemaOrdinalsMap(HoodieMetadataPayload.COLUMN_STATS_FIELD_FILE_NAME)
|
||||
val nullCountOrdinal = colStatsSchemaOrdinalsMap(HoodieMetadataPayload.COLUMN_STATS_FIELD_NULL_COUNT)
|
||||
|
||||
val colName = row.getString(colStatsSchemaOrdinalsMap("columnName"))
|
||||
val transposedRDD = colStatsDF.rdd
|
||||
.filter(row => sortedColumns.contains(row.getString(colNameOrdinal)))
|
||||
.map { row =>
|
||||
val (minValue, _) = tryUnpackNonNullVal(row.getAs[Row](minValueOrdinal))
|
||||
val (maxValue, _) = tryUnpackNonNullVal(row.getAs[Row](maxValueOrdinal))
|
||||
|
||||
val colName = row.getString(colNameOrdinal)
|
||||
val colType = tableSchemaFieldMap(colName).dataType
|
||||
|
||||
val rowValsSeq = row.toSeq.toArray
|
||||
|
||||
rowValsSeq(colStatsSchemaOrdinalsMap("minValue")) = deserialize(minValue, colType)
|
||||
rowValsSeq(colStatsSchemaOrdinalsMap("maxValue")) = deserialize(maxValue, colType)
|
||||
rowValsSeq(minValueOrdinal) = deserialize(minValue, colType)
|
||||
rowValsSeq(maxValueOrdinal) = deserialize(maxValue, colType)
|
||||
|
||||
Row(rowValsSeq:_*)
|
||||
}
|
||||
.groupBy(r => r.getString(colStatsSchemaOrdinalsMap("fileName")))
|
||||
.groupBy(r => r.getString(fileNameOrdinal))
|
||||
.foldByKey(Seq[Row]()) {
|
||||
case (_, columnRows) =>
|
||||
// Rows seq is always non-empty (otherwise it won't be grouped into)
|
||||
val fileName = columnRows.head.get(colStatsSchemaOrdinalsMap("fileName"))
|
||||
val fileName = columnRows.head.get(fileNameOrdinal)
|
||||
val coalescedRowValuesSeq = columnRows.toSeq
|
||||
// NOTE: It's crucial to maintain appropriate ordering of the columns
|
||||
// matching table layout
|
||||
.sortBy(_.getString(colStatsSchemaOrdinalsMap("columnName")))
|
||||
.sortBy(_.getString(colNameOrdinal))
|
||||
.foldLeft(Seq[Any](fileName)) {
|
||||
case (acc, columnRow) =>
|
||||
acc ++ Seq("minValue", "maxValue", "nullCount").map(ord => columnRow.get(colStatsSchemaOrdinalsMap(ord)))
|
||||
acc ++ Seq(minValueOrdinal, maxValueOrdinal, nullCountOrdinal).map(ord => columnRow.get(ord))
|
||||
}
|
||||
|
||||
Seq(Row(coalescedRowValuesSeq:_*))
|
||||
@@ -147,6 +176,49 @@ trait ColumnStatsIndexSupport {
|
||||
|
||||
spark.createDataFrame(transposedRDD, indexSchema)
|
||||
}
|
||||
|
||||
private def readFullColumnStatsIndexInternal(spark: SparkSession, tableBasePath: String) = {
|
||||
val metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(tableBasePath)
|
||||
// Read Metadata Table's Column Stats Index into Spark's [[DataFrame]]
|
||||
spark.read.format("org.apache.hudi")
|
||||
.load(s"$metadataTablePath/${MetadataPartitionType.COLUMN_STATS.getPartitionPath}")
|
||||
}
|
||||
|
||||
private def readColumnStatsIndexForColumnsInternal(spark: SparkSession, targetColumns: Seq[String], metadataConfig: HoodieMetadataConfig, tableBasePath: String) = {
|
||||
val ctx = new HoodieSparkEngineContext(new JavaSparkContext(spark.sparkContext))
|
||||
|
||||
// Read Metadata Table's Column Stats Index into Spark's [[DataFrame]] by
|
||||
// - Fetching the records from CSI by key-prefixes (encoded column names)
|
||||
// - Deserializing fetched records into [[InternalRow]]s
|
||||
// - Composing [[DataFrame]]
|
||||
val metadataTableDF = {
|
||||
val metadataTable = HoodieTableMetadata.create(ctx, metadataConfig, tableBasePath, FileSystemViewStorageConfig.SPILLABLE_DIR.defaultValue)
|
||||
|
||||
// TODO encoding should be done internally w/in HoodieBackedTableMetadata
|
||||
val encodedTargetColumnNames = targetColumns.map(colName => new ColumnIndexID(colName).asBase64EncodedString())
|
||||
|
||||
val recordsRDD: RDD[HoodieRecord[HoodieMetadataPayload]] =
|
||||
HoodieJavaRDD.getJavaRDD(
|
||||
metadataTable.getRecordsByKeyPrefixes(encodedTargetColumnNames.asJava, HoodieTableMetadataUtil.PARTITION_NAME_COLUMN_STATS)
|
||||
)
|
||||
|
||||
val catalystRowsRDD: RDD[InternalRow] = recordsRDD.mapPartitions { it =>
|
||||
val metadataRecordSchema = new Parser().parse(metadataRecordSchemaString)
|
||||
val converter = AvroConversionUtils.createAvroToInternalRowConverter(metadataRecordSchema, metadataRecordStructType)
|
||||
|
||||
it.map { record =>
|
||||
// schema and props are ignored for generating metadata record from the payload
|
||||
// instead, the underlying file system, or bloom filter, or columns stats metadata (part of payload) are directly used
|
||||
toScalaOption(record.getData.getInsertValue(null, null))
|
||||
.flatMap(avroRecord => converter(avroRecord.asInstanceOf[GenericRecord]))
|
||||
.orNull
|
||||
}
|
||||
}
|
||||
|
||||
HoodieUnsafeRDDUtils.createDataFrame(spark, catalystRowsRDD, metadataRecordStructType)
|
||||
}
|
||||
metadataTableDF
|
||||
}
|
||||
}
|
||||
|
||||
object ColumnStatsIndexSupport {
|
||||
@@ -156,6 +228,9 @@ object ColumnStatsIndexSupport {
|
||||
private val COLUMN_STATS_INDEX_MAX_VALUE_STAT_NAME = "maxValue"
|
||||
private val COLUMN_STATS_INDEX_NUM_NULLS_STAT_NAME = "num_nulls"
|
||||
|
||||
private val metadataRecordSchemaString: String = HoodieMetadataRecord.SCHEMA$.toString
|
||||
private val metadataRecordStructType: StructType = AvroConversionUtils.convertAvroSchemaToStructType(HoodieMetadataRecord.SCHEMA$)
|
||||
|
||||
/**
|
||||
* @VisibleForTesting
|
||||
*/
|
||||
|
||||
@@ -27,7 +27,7 @@ import org.apache.hadoop.mapred.JobConf
|
||||
|
||||
import org.apache.hudi.HoodieBaseRelation.getPartitionPath
|
||||
import org.apache.hudi.HoodieConversionUtils.toScalaOption
|
||||
import org.apache.hudi.common.config.SerializableConfiguration
|
||||
import org.apache.hudi.common.config.{HoodieMetadataConfig, SerializableConfiguration}
|
||||
import org.apache.hudi.common.fs.FSUtils
|
||||
import org.apache.hudi.common.model.{HoodieFileFormat, HoodieRecord}
|
||||
import org.apache.hudi.common.table.timeline.{HoodieInstant, HoodieTimeline}
|
||||
@@ -68,7 +68,8 @@ case class HoodieTableState(tablePath: String,
|
||||
recordKeyField: String,
|
||||
preCombineFieldOpt: Option[String],
|
||||
usesVirtualKeys: Boolean,
|
||||
recordPayloadClassName: String)
|
||||
recordPayloadClassName: String,
|
||||
metadataConfig: HoodieMetadataConfig)
|
||||
|
||||
/**
|
||||
* Hoodie BaseRelation which extends [[PrunedFilteredScan]].
|
||||
@@ -135,7 +136,7 @@ abstract class HoodieBaseRelation(val sqlContext: SQLContext,
|
||||
val internalSchemaFromMeta = try {
|
||||
schemaUtil.getTableInternalSchemaFromCommitMetadata.orElse(InternalSchema.getEmptyInternalSchema)
|
||||
} catch {
|
||||
case _ => InternalSchema.getEmptyInternalSchema
|
||||
case _: Exception => InternalSchema.getEmptyInternalSchema
|
||||
}
|
||||
(avroSchema, internalSchemaFromMeta)
|
||||
}
|
||||
@@ -339,7 +340,8 @@ abstract class HoodieBaseRelation(val sqlContext: SQLContext,
|
||||
recordKeyField = recordKeyField,
|
||||
preCombineFieldOpt = preCombineFieldOpt,
|
||||
usesVirtualKeys = !tableConfig.populateMetaFields(),
|
||||
recordPayloadClassName = tableConfig.getPayloadClass
|
||||
recordPayloadClassName = tableConfig.getPayloadClass,
|
||||
metadataConfig = fileIndex.metadataConfig
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ import org.apache.hudi.common.util.StringUtils
|
||||
import org.apache.hudi.exception.HoodieException
|
||||
import org.apache.hudi.keygen.constant.KeyGeneratorOptions
|
||||
import org.apache.hudi.keygen.{TimestampBasedAvroKeyGenerator, TimestampBasedKeyGenerator}
|
||||
import org.apache.hudi.metadata.{HoodieMetadataPayload, HoodieTableMetadata}
|
||||
import org.apache.hudi.metadata.{HoodieMetadataPayload, HoodieTableMetadata, HoodieTableMetadataUtil, MetadataPartitionType}
|
||||
import org.apache.spark.internal.Logging
|
||||
import org.apache.spark.sql.catalyst.InternalRow
|
||||
import org.apache.spark.sql.catalyst.expressions.{And, Expression, Literal}
|
||||
@@ -195,15 +195,14 @@ case class HoodieFileIndex(spark: SparkSession,
|
||||
* @return list of pruned (data-skipped) candidate base-files' names
|
||||
*/
|
||||
private def lookupCandidateFilesInMetadataTable(queryFilters: Seq[Expression]): Try[Option[Set[String]]] = Try {
|
||||
val fs = metaClient.getFs
|
||||
val metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(basePath)
|
||||
|
||||
if (!isDataSkippingEnabled || !fs.exists(new Path(metadataTablePath)) || queryFilters.isEmpty) {
|
||||
if (!isDataSkippingEnabled || queryFilters.isEmpty || !HoodieTableMetadataUtil.getCompletedMetadataPartitions(metaClient.getTableConfig)
|
||||
.contains(HoodieTableMetadataUtil.PARTITION_NAME_COLUMN_STATS)) {
|
||||
Option.empty
|
||||
} else {
|
||||
val colStatsDF: DataFrame = readColumnStatsIndex(spark, metadataTablePath)
|
||||
val queryReferencedColumns = collectReferencedColumns(spark, queryFilters, schema)
|
||||
|
||||
val colStatsDF: DataFrame = readColumnStatsIndex(spark, basePath, metadataConfig, queryReferencedColumns)
|
||||
|
||||
// Persist DF to avoid re-computing column statistics unraveling
|
||||
withPersistence(colStatsDF) {
|
||||
val transposedColStatsDF: DataFrame = transposeColumnStatsIndex(spark, colStatsDF, queryReferencedColumns, schema)
|
||||
|
||||
@@ -23,7 +23,7 @@ import org.apache.avro.generic.{GenericRecord, GenericRecordBuilder, IndexedReco
|
||||
import org.apache.hadoop.conf.Configuration
|
||||
import org.apache.hadoop.fs.Path
|
||||
import org.apache.hadoop.mapred.JobConf
|
||||
import org.apache.hudi.HoodieConversionUtils.toScalaOption
|
||||
import org.apache.hudi.HoodieConversionUtils.{toJavaOption, toScalaOption}
|
||||
import org.apache.hudi.HoodieMergeOnReadRDD.{AvroDeserializerSupport, collectFieldOrdinals, getPartitionPath, projectAvro, projectAvroUnsafe, projectRowUnsafe, resolveAvroSchemaNullability}
|
||||
import org.apache.hudi.MergeOnReadSnapshotRelation.getFilePath
|
||||
import org.apache.hudi.common.config.HoodieMetadataConfig
|
||||
@@ -324,17 +324,23 @@ private object HoodieMergeOnReadRDD {
|
||||
val fs = FSUtils.getFs(tablePath, hadoopConf)
|
||||
|
||||
if (HoodieTableMetadata.isMetadataTable(tablePath)) {
|
||||
val metadataConfig = HoodieMetadataConfig.newBuilder().enable(true).build()
|
||||
val metadataConfig = tableState.metadataConfig
|
||||
val dataTableBasePath = getDataTableBasePathFromMetadataTable(tablePath)
|
||||
val metadataTable = new HoodieBackedTableMetadata(
|
||||
new HoodieLocalEngineContext(hadoopConf), metadataConfig,
|
||||
dataTableBasePath,
|
||||
hadoopConf.get(HoodieRealtimeConfig.SPILLABLE_MAP_BASE_PATH_PROP, HoodieRealtimeConfig.DEFAULT_SPILLABLE_MAP_BASE_PATH))
|
||||
|
||||
// We have to force full-scan for the MT log record reader, to make sure
|
||||
// we can iterate over all of the partitions, since by default some of the partitions (Column Stats,
|
||||
// Bloom Filter) are in "point-lookup" mode
|
||||
val forceFullScan = true
|
||||
|
||||
// NOTE: In case of Metadata Table partition path equates to partition name (since there's just one level
|
||||
// of indirection among MT partitions)
|
||||
val relativePartitionPath = getRelativePartitionPath(new Path(tablePath), partitionPath)
|
||||
metadataTable.getLogRecordScanner(logFiles.asJava, relativePartitionPath).getLeft
|
||||
metadataTable.getLogRecordScanner(logFiles.asJava, relativePartitionPath, toJavaOption(Some(forceFullScan)))
|
||||
.getLeft
|
||||
} else {
|
||||
val logRecordScannerBuilder = HoodieMergedLogRecordScanner.newBuilder()
|
||||
.withFileSystem(fs)
|
||||
|
||||
@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration
|
||||
import org.apache.hadoop.fs.{FileSystem, Path}
|
||||
import org.apache.hadoop.hive.conf.HiveConf
|
||||
import org.apache.hudi.DataSourceWriteOptions._
|
||||
import org.apache.hudi.HoodieConversionUtils.toProperties
|
||||
import org.apache.hudi.HoodieWriterUtils._
|
||||
import org.apache.hudi.avro.HoodieAvroUtils
|
||||
import org.apache.hudi.client.{HoodieWriteResult, SparkRDDWriteClient}
|
||||
@@ -360,7 +361,7 @@ object HoodieSparkSqlWriter {
|
||||
None
|
||||
}
|
||||
} catch {
|
||||
case _ => None
|
||||
case _: Exception => None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -568,12 +569,6 @@ object HoodieSparkSqlWriter {
|
||||
(syncHiveSuccess, common.util.Option.ofNullable(instantTime))
|
||||
}
|
||||
|
||||
def toProperties(params: Map[String, String]): TypedProperties = {
|
||||
val props = new TypedProperties()
|
||||
params.foreach(kv => props.setProperty(kv._1, kv._2))
|
||||
props
|
||||
}
|
||||
|
||||
private def handleSaveModes(spark: SparkSession, mode: SaveMode, tablePath: Path, tableConfig: HoodieTableConfig, tableName: String,
|
||||
operation: WriteOperationType, fs: FileSystem): Unit = {
|
||||
if (mode == SaveMode.Append && tableExists) {
|
||||
|
||||
@@ -91,12 +91,6 @@ object HoodieWriterUtils {
|
||||
Map() ++ hoodieConfig.getProps.asScala ++ globalProps ++ DataSourceOptionsHelper.translateConfigurations(parameters)
|
||||
}
|
||||
|
||||
def toProperties(params: Map[String, String]): TypedProperties = {
|
||||
val props = new TypedProperties()
|
||||
params.foreach(kv => props.setProperty(kv._1, kv._2))
|
||||
props
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the partition columns to stored to hoodie.properties.
|
||||
* @param parameters
|
||||
|
||||
@@ -308,7 +308,7 @@ object SparkHoodieTableFileIndex {
|
||||
}
|
||||
|
||||
private def deduceQueryType(configProperties: TypedProperties): HoodieTableQueryType = {
|
||||
configProperties.asScala(QUERY_TYPE.key) match {
|
||||
configProperties.asScala.getOrElse(QUERY_TYPE.key, QUERY_TYPE.defaultValue) match {
|
||||
case QUERY_TYPE_SNAPSHOT_OPT_VAL => HoodieTableQueryType.SNAPSHOT
|
||||
case QUERY_TYPE_INCREMENTAL_OPT_VAL => HoodieTableQueryType.INCREMENTAL
|
||||
case QUERY_TYPE_READ_OPTIMIZED_OPT_VAL => HoodieTableQueryType.READ_OPTIMIZED
|
||||
|
||||
@@ -154,6 +154,7 @@ class HoodieStreamSource(
|
||||
} else {
|
||||
// Consume the data between (startCommitTime, endCommitTime]
|
||||
val incParams = parameters ++ Map(
|
||||
DataSourceReadOptions.QUERY_TYPE.key -> DataSourceReadOptions.QUERY_TYPE_INCREMENTAL_OPT_VAL,
|
||||
DataSourceReadOptions.BEGIN_INSTANTTIME.key -> startCommitTime(startOffset),
|
||||
DataSourceReadOptions.END_INSTANTTIME.key -> endOffset.commitTime
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user