diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..cb7565a --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,4 @@ +#!/usr/bin/env groovy +common { + slackChannel = '#connect-eng' +} diff --git a/NOTICE b/NOTICE index 37aa9de..929fdfc 100644 --- a/NOTICE +++ b/NOTICE @@ -285,8 +285,8 @@ The following libraries are included in packaged versions of this project: * Pentaho aggdesigner * COPYRIGHT: Copyright 2006 - 2013 Pentaho Corporation - * LICENSE: licenses/LICENSE.gpl2.txt - * HOMEPAGE: https://github.com/pentaho/pentaho-aggdesigner + * LICENSE: licenses/LICENSE.apache2.txt + * HOMEPAGE: https://github.com/julianhyde/aggdesigner/tree/master/pentaho-aggdesigner-algorithm * SLF4J * COPYRIGHT: Copyright (c) 2004-2013 QOS.ch diff --git a/docs/changelog.rst b/docs/changelog.rst index 64a88b5..f7d3295 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -3,6 +3,29 @@ Changelog ========= +Version 3.3.0 +------------- + +* `PR-187 `_ - CC-491: Consolidate and simplify unit tests of HDFS connector. +* `PR-205 `_ - Upgrade avro to 1.8.2. + +Version 3.2.2 +------------- + +* `PR-194 `_ - Fix HdfsSinkConnector to extend from SinkConnector instead of Connector. +* `PR-200 `_ - Fix incorrect licensing and webpage info. + +Version 3.2.1 +------------- +No changes + +Version 3.2.0 +------------- + +* `PR-135 `_ - Fix typos +* `PR-164 `_ - Issue 136 - Support topic with dots in hive. +* `PR-170 `_ - MINOR: Upgrade Hadoop version to 2.7.3 and joda-time to 2.9.7 + Version 3.1.1 ------------- No changes diff --git a/docs/conf.py b/docs/conf.py index 3518db9..2604c1a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -57,9 +57,9 @@ def setup(app): # built documents. # # The short X.Y version. -version = '3.0' +version = '3.3' # The full version, including alpha/beta/rc tags. -release = '3.1.3-SNAPSHOT' +release = '3.3.0-hotfix.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/hdfs_connector.rst b/docs/hdfs_connector.rst index 951691d..8d42fe2 100644 --- a/docs/hdfs_connector.rst +++ b/docs/hdfs_connector.rst @@ -5,8 +5,8 @@ The HDFS connector allows you to export data from Kafka topics to HDFS files in and integrates with Hive to make data immediately available for querying with HiveQL. The connector periodically polls data from Kafka and writes them to HDFS. The data from each Kafka -topic is partitioned by the provided partitioner and divided into chucks. Each chunk of data is -represented as an HDFS file with topic, kafka partition, start and end offsets of this data chuck +topic is partitioned by the provided partitioner and divided into chunks. Each chunk of data is +represented as an HDFS file with topic, kafka partition, start and end offsets of this data chunk in the filename. If no partitioner is specified in the configuration, the default partitioner which preserves the Kafka partitioning is used. The size of each data chunk is determined by the number of records written to HDFS, the time written to HDFS and schema compatibility. @@ -20,9 +20,8 @@ Quickstart In this Quickstart, we use the HDFS connector to export data produced by the Avro console producer to HDFS. -Start Zookeeper, Kafka and SchemaRegistry if you haven't done so. The instructions on how to start -these services are available at the Confluent Platform QuickStart. You also need to have Hadoop -running locally or remotely and make sure that you know the HDFS url. For Hive integration, you +Before you start the Confluent services, make sure Hadoop is +running locally or remotely and that you know the HDFS url. For Hive integration, you need to have Hive installed and to know the metastore thrift uri. This Quickstart assumes that you started the required services with the default configurations and @@ -39,12 +38,41 @@ Also, this Quickstart assumes that security is not configured for HDFS and Hive please make the necessary configurations change following `Secure HDFS and Hive Metastore`_ section. -First, start the Avro console producer:: +First, start all the necessary services using Confluent CLI. + +.. tip:: + + If not already in your PATH, add Confluent's ``bin`` directory by running: ``export PATH=/bin:$PATH`` + +.. sourcecode:: bash + + $ confluent start + +Every service will start in order, printing a message with its status: + +.. sourcecode:: bash + + Starting zookeeper + zookeeper is [UP] + Starting kafka + kafka is [UP] + Starting schema-registry + schema-registry is [UP] + Starting kafka-rest + kafka-rest is [UP] + Starting connect + connect is [UP] + +Next, start the Avro console producer to import a few records to Kafka: + +.. sourcecode:: bash $ ./bin/kafka-avro-console-producer --broker-list localhost:9092 --topic test_hdfs \ --property value.schema='{"type":"record","name":"myrecord","fields":[{"name":"f1","type":"string"}]}' -Then in the console producer, type in:: +Then in the console producer, type in: + +.. sourcecode:: bash {"f1": "value1"} {"f1": "value2"} @@ -54,42 +82,102 @@ The three records entered are published to the Kafka topic ``test_hdfs`` in Avro Before starting the connector, please make sure that the configurations in ``etc/kafka-connect-hdfs/quickstart-hdfs.properties`` are properly set to your configurations of -Hadoop, e.g. ``hdfs.url`` points to the proper HDFS and using FQDN in the host. Then run the -following command to start Kafka connect with the HDFS connector:: +Hadoop, e.g. ``hdfs.url`` points to the proper HDFS and using FQDN in the host. Then start connector by loading its +configuration with the following command: + +.. sourcecode:: bash + $ confluent load hdfs-sink -d etc/kafka-connect-hdfs/quickstart-hdfs.properties + { + "name": "hdfs-sink", + "config": { + "connector.class": "io.confluent.connect.hdfs.HdfsSinkConnector", + "tasks.max": "1", + "topics": "test_hdfs", + "hdfs.url": "hdfs://localhost:9000", + "flush.size": "3", + "name": "hdfs-sink" + }, + "tasks": [] + } - $ ./bin/connect-standalone etc/schema-registry/connect-avro-standalone.properties \ - etc/kafka-connect-hdfs/quickstart-hdfs.properties +To check that the connector started successfully view the Connect worker's log by running: -You should see that the process starts up and logs some messages, and then exports data from Kafka -to HDFS. Once the connector finishes ingesting data to HDFS, check that the data is available -in HDFS:: +.. sourcecode:: bash + + $ confluent log connect + +Towards the end of the log you should see that the connector starts, logs a few messages, and then exports +data from Kafka to HDFS. +Once the connector finishes ingesting data to HDFS, check that the data is available in HDFS: + +.. sourcecode:: bash $ hadoop fs -ls /topics/test_hdfs/partition=0 You should see a file with name ``/topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro`` The file name is encoded as ``topic+kafkaPartition+startOffset+endOffset.format``. -You can use ``avro-tools-1.7.7.jar`` -(available in `Apache mirrors `_) -to extract the content of the file. Run ``avro-tools`` directly on Hadoop as:: +You can use ``avro-tools-1.8.2.jar`` +(available in `Apache mirrors `_) +to extract the content of the file. Run ``avro-tools`` directly on Hadoop as: + +.. sourcecode:: bash + + $ hadoop jar avro-tools-1.8.2.jar tojson \ + hdfs:///topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro + +where "" is the HDFS name node hostname. - $ hadoop jar avro-tools-1.7.7.jar tojson \ - /topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro +or, if you experience issues, first copy the avro file from HDFS to the local filesystem and try again with java: -or, if you experience issues, first copy the avro file from HDFS to the local filesystem and try again with java:: +.. sourcecode:: bash $ hadoop fs -copyToLocal /topics/test_hdfs/partition=0/test_hdfs+0+0000000000+0000000002.avro \ /tmp/test_hdfs+0+0000000000+0000000002.avro - $ java -jar avro-tools-1.7.7.jar tojson /tmp/test_hdfs+0+0000000000+0000000002.avro + $ java -jar avro-tools-1.8.2.jar tojson /tmp/test_hdfs+0+0000000000+0000000002.avro -You should see the following output:: +You should see the following output: + +.. sourcecode:: bash {"f1":"value1"} {"f1":"value2"} {"f1":"value3"} +Finally, stop the Connect worker as well as all the rest of the Confluent services by running: + +.. sourcecode:: bash + + $ confluent stop + Stopping connect + connect is [DOWN] + Stopping kafka-rest + kafka-rest is [DOWN] + Stopping schema-registry + schema-registry is [DOWN] + Stopping kafka + kafka is [DOWN] + Stopping zookeeper + zookeeper is [DOWN] + +or stop all the services and additionally wipe out any data generated during this quickstart by running: + +.. sourcecode:: bash + + $ confluent destroy + Stopping connect + connect is [DOWN] + Stopping kafka-rest + kafka-rest is [DOWN] + Stopping schema-registry + schema-registry is [DOWN] + Stopping kafka + kafka is [DOWN] + Stopping zookeeper + zookeeper is [DOWN] + Deleting: /tmp/confluent.w1CpYsaI .. note:: If you want to run the Quickstart with Hive integration, before starting the connector, you need to add the following configurations to @@ -144,7 +232,9 @@ description of the available configuration options. Example ~~~~~~~ -Here is the content of ``etc/kafka-connect-hdfs/quickstart-hdfs.properties``:: +Here is the content of ``etc/kafka-connect-hdfs/quickstart-hdfs.properties``: + +.. sourcecode:: bash name=hdfs-sink connector.class=io.confluent.connect.hdfs.HdfsSinkConnector @@ -166,18 +256,22 @@ Format and Partitioner ~~~~~~~~~~~~~~~~~~~~~~ You need to specify the ``format.class`` and ``partitioner.class`` if you want to write other formats to HDFS or use other partitioners. The following example configurations demonstrates how to -write Parquet format and use hourly partitioner:: +write Parquet format and use hourly partitioner: + +.. sourcecode:: bash format.class=io.confluent.connect.hdfs.parquet.ParquetFormat partitioner.class=io.confluent.connect.hdfs.partitioner.HourlyPartitioner -.. note:: If you want ot use the field partitioner, you need to specify the ``partition.field.name`` +.. note:: If you want to use the field partitioner, you need to specify the ``partition.field.name`` configuration as well to specify the field name of the record. Hive Integration ~~~~~~~~~~~~~~~~ At minimum, you need to specify ``hive.integration``, ``hive.metastore.uris`` and -``schema.compatibility`` when integrating Hive. Here is an example configuration:: +``schema.compatibility`` when integrating Hive. Here is an example configuration: + +.. sourcecode:: bash hive.integration=true hive.metastore.uris=thrift://localhost:9083 # FQDN for the host part @@ -203,7 +297,9 @@ latest Hive table schema. Please find more information on schema compatibility i Secure HDFS and Hive Metastore ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To work with secure HDFS and Hive metastore, you need to specify ``hdfs.authentication.kerberos``, -``connect.hdfs.principal``, ``connect.keytab``, ``hdfs.namenode.principal``:: +``connect.hdfs.principal``, ``connect.keytab``, ``hdfs.namenode.principal``: + +.. sourcecode:: bash hdfs.authentication.kerberos=true connect.hdfs.principal=connect-hdfs/_HOST@YOUR-REALM.COM diff --git a/licenses/LICENSE.gpl2.txt b/licenses/LICENSE.gpl2.txt deleted file mode 100644 index d159169..0000000 --- a/licenses/LICENSE.gpl2.txt +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/pom.xml b/pom.xml index fa48444..db8396a 100644 --- a/pom.xml +++ b/pom.xml @@ -50,15 +50,15 @@ - 3.1.2 - 0.10.1.1-cp1 + 3.3.0 + 0.11.0.0 4.12 - 2.7.2 + 2.7.3 1.2.1 - 1.7.7 + 1.8.2 1.7.0 2.4 - 1.6.2 + 2.9.7 UTF-8 http://packages.confluent.io/maven/ 2.6.3 @@ -82,6 +82,7 @@ org.apache.kafka connect-api ${kafka.version} + provided io.confluent @@ -203,7 +204,7 @@ com.google.guava guava - 19.0 + 20.0 com.google.cloud.bigdataoss @@ -267,8 +268,9 @@ maven-surefire-plugin 2.18.1 - -Djava.awt.headless=true - pertest + -Djava.awt.headless=true -XX:MaxPermSize=512m + false + 1 diff --git a/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java b/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java new file mode 100644 index 0000000..5824803 --- /dev/null +++ b/src/main/java/com/qubole/streamx/format/csv/CsvParquetFormat.java @@ -0,0 +1,35 @@ +package com.qubole.streamx.format.csv; + +import io.confluent.connect.avro.AvroData; +import io.confluent.connect.hdfs.Format; +import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; +import io.confluent.connect.hdfs.RecordWriterProvider; +import io.confluent.connect.hdfs.SchemaFileReader; +import io.confluent.connect.hdfs.hive.HiveMetaStore; +import io.confluent.connect.hdfs.hive.HiveUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Takes CSV data from a {@link org.apache.kafka.connect.storage.StringConverter} and converts it to Parquet format + * prior to S3 upload. + */ +public class CsvParquetFormat implements Format { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetFormat.class); + + @Override + public RecordWriterProvider getRecordWriterProvider() { + return new CsvParquetRecordWriterProvider(); + } + + @Override + public SchemaFileReader getSchemaFileReader(AvroData avroData) { + return null; + } + + @Override + public HiveUtil getHiveUtil(HdfsSinkConnectorConfig config, AvroData avroData, HiveMetaStore hiveMetaStore) { + return null; + } +} diff --git a/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java b/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java new file mode 100644 index 0000000..270d4e2 --- /dev/null +++ b/src/main/java/com/qubole/streamx/format/csv/CsvParquetRecordWriterProvider.java @@ -0,0 +1,163 @@ +package com.qubole.streamx.format.csv; + +import com.qubole.streamx.s3.S3SinkConnector; +import com.qubole.streamx.s3.S3SinkConnectorConfig; +import io.confluent.connect.avro.AvroData; +import io.confluent.connect.hdfs.RecordWriter; +import io.confluent.connect.hdfs.RecordWriterProvider; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.parquet.column.ColumnDescriptor; +import org.apache.parquet.hadoop.ParquetOutputFormat; +import org.apache.parquet.hadoop.api.WriteSupport; +import org.apache.parquet.hadoop.metadata.CompressionCodecName; +import org.apache.parquet.io.api.Binary; +import org.apache.parquet.io.api.RecordConsumer; +import org.apache.parquet.schema.MessageType; +import org.apache.parquet.schema.MessageTypeParser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; + +/** + * Needs to be used in conjunction with the {@link org.apache.kafka.connect.storage.StringConverter}, which is set via the + * config/connect-*.properties key.converter and value.converter properties. + */ +public class CsvParquetRecordWriterProvider implements RecordWriterProvider { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetRecordWriterProvider.class); + + @Override + public String getExtension() { + return ".snappy.parquet"; + } + + @Override + public RecordWriter getRecordWriter(Configuration conf, String fileName, SinkRecord record, AvroData avroData) throws IOException { + Path path = new Path(fileName); + + String schemaString = S3SinkConnector.getConfigString(S3SinkConnectorConfig.PARQUET_SCHEMA_CONFIG); + if (schemaString == null) { + throw new IllegalArgumentException(String.format("A Parquet schema must be specified under property %s!", + S3SinkConnectorConfig.PARQUET_SCHEMA_CONFIG)); + } + + MessageType schema = MessageTypeParser.parseMessageType(schemaString); + log.debug("Schema String = {}", schema.toString()); + + final SourceParquetOutputFormat sourceParquetOutputFormat = new SourceParquetOutputFormat(schema); + final org.apache.hadoop.mapreduce.RecordWriter recordWriter; + try { + recordWriter = sourceParquetOutputFormat.getRecordWriter(conf, path, CompressionCodecName.SNAPPY); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + return new RecordWriter() { + @Override + public void write(SinkRecord sinkRecord) throws IOException { + try { + log.trace("SinkRecord = {}", sinkRecord.value()); + recordWriter.write(null, sinkRecord.value().toString()); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + } + + @Override + public void close() throws IOException { + try { + recordWriter.close(null); + } catch (InterruptedException ie) { + throw new IOException(ie); + } + } + }; + } + + /** + * Used to parse and write each CSV record to the Parquet {@link RecordConsumer}. + */ + private static final class CsvParquetWriteSupport extends WriteSupport { + + private static final Logger log = LoggerFactory.getLogger(CsvParquetWriteSupport.class); + + private final MessageType schema; + + private RecordConsumer recordConsumer; + + private List columns; + + // TODO: specify the csv splitter + public CsvParquetWriteSupport(MessageType schema) { + this.schema = schema; + this.columns = schema.getColumns(); + } + + @Override + public WriteContext init(Configuration configuration) { + return new WriteContext(schema, new HashMap()); + } + + @Override + public void prepareForWrite(RecordConsumer recordConsumer) { + this.recordConsumer = recordConsumer; + } + + @Override + public void write(String record) { + String[] csvRecords = record.split(","); + + if (csvRecords.length > 0) { + recordConsumer.startMessage(); + + for (int i = 0; i < columns.size(); i++) { + ColumnDescriptor columnDescriptor = columns.get(i); + + // If there aren't enough entries in the csvRecords, write an empty string + String csvRecord = (csvRecords.length < i) ? "" : csvRecords[i]; + + recordConsumer.startField(columns.get(i).getPath()[0], i); + switch (columnDescriptor.getType()) { + case INT32: + recordConsumer.addInteger(Integer.parseInt(csvRecord)); + break; + case INT64: + case INT96: + recordConsumer.addLong(Long.parseLong(csvRecord)); + break; + case BINARY: + case FIXED_LEN_BYTE_ARRAY: + recordConsumer.addBinary(Binary.fromString(csvRecord)); + break; + case BOOLEAN: + recordConsumer.addBoolean(Boolean.parseBoolean(csvRecord)); + break; + case FLOAT: + recordConsumer.addFloat(Float.parseFloat(csvRecord)); + break; + case DOUBLE: + recordConsumer.addDouble(Double.parseDouble(csvRecord)); + break; + default: + throw new UnsupportedOperationException( + String.format("Unsupported record conversion for type '%s'!", columnDescriptor.getType().name())); + } + + recordConsumer.endField(columns.get(i).getPath()[0], i); + } + recordConsumer.endMessage(); + } + } + } + + private static final class SourceParquetOutputFormat extends ParquetOutputFormat { + private SourceParquetOutputFormat(MessageType schema) { + super(new CsvParquetWriteSupport(schema)); + } + } + +} diff --git a/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java b/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java index c7c642b..8328076 100644 --- a/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java +++ b/src/main/java/com/qubole/streamx/s3/S3SinkConnector.java @@ -37,7 +37,7 @@ public class S3SinkConnector extends Connector { private static final Logger log = LoggerFactory.getLogger(S3SinkConnector.class); private Map configProperties; - private S3SinkConnectorConfig config; + private static S3SinkConnectorConfig config; @Override public String version() { @@ -78,4 +78,8 @@ public void stop() throws ConnectException { public ConfigDef config() { return S3SinkConnectorConfig.getConfig(); } + + public static String getConfigString(String key) { + return config.getString(key); + } } diff --git a/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java b/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java index 8e5aac6..17f8caa 100644 --- a/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java +++ b/src/main/java/com/qubole/streamx/s3/S3SinkConnectorConfig.java @@ -58,6 +58,9 @@ public class S3SinkConnectorConfig extends HdfsSinkConnectorConfig { public static final String NAME_DEFAULT = ""; private static final String NAME_DISPLAY = "Connector Name"; + public static final String PARQUET_SCHEMA_CONFIG = "s3.parquet.schema"; + private static final String PARQUET_SCHEMA_DOC = "The schema used to write a Parquet file"; + private static final String PARQUET_SCHEMA_DISPLAY = "Parquet Schema"; static { config.define(S3_URL_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, S3_URL_DOC, S3_GROUP, 1, ConfigDef.Width.MEDIUM, S3_URL_DISPLAY); @@ -66,6 +69,7 @@ public class S3SinkConnectorConfig extends HdfsSinkConnectorConfig { config.define(DB_USER_CONFIG, ConfigDef.Type.STRING, DB_USER_DEFAULT, ConfigDef.Importance.LOW, DB_USER_DOC, WAL_GROUP, 1, ConfigDef.Width.MEDIUM, DB_USER_DISPLAY); config.define(DB_PASSWORD_CONFIG, ConfigDef.Type.STRING, DB_PASSWORD_DEFAULT, ConfigDef.Importance.LOW, DB_PASSWORD_DOC, WAL_GROUP, 1, ConfigDef.Width.MEDIUM, DB_PASSWORD_DISPLAY); config.define(NAME_CONFIG, ConfigDef.Type.STRING, NAME_DEFAULT, ConfigDef.Importance.HIGH, NAME_DOC, S3_GROUP,1, ConfigDef.Width.MEDIUM, NAME_DISPLAY); + config.define(PARQUET_SCHEMA_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, PARQUET_SCHEMA_DOC, S3_GROUP, 1, ConfigDef.Width.LONG, PARQUET_SCHEMA_DISPLAY); } diff --git a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnector.java b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnector.java index 8b365f0..909928b 100644 --- a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnector.java +++ b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnector.java @@ -17,9 +17,9 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.connect.connector.Connector; import org.apache.kafka.connect.connector.Task; import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.sink.SinkConnector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +31,7 @@ /** * HdfsSinkConnector is a Kafka Connect Connector implementation that ingest data from Kafka to HDFS. */ -public class HdfsSinkConnector extends Connector { +public class HdfsSinkConnector extends SinkConnector { private static final Logger log = LoggerFactory.getLogger(HdfsSinkConnector.class); private Map configProperties; diff --git a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java index 4914440..f06a7df 100644 --- a/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java +++ b/src/main/java/io/confluent/connect/hdfs/HdfsSinkConnectorConfig.java @@ -14,10 +14,9 @@ package io.confluent.connect.hdfs; - import com.qubole.streamx.s3.S3Storage; import org.apache.hadoop.conf.Configuration; -import org.apache.kafka.clients.producer.internals.DefaultPartitioner; +// import org.apache.kafka.clients.producer.internals.DefaultPartitioner; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; @@ -31,6 +30,7 @@ import java.util.Map; import io.confluent.connect.hdfs.partitioner.DailyPartitioner; +import io.confluent.connect.hdfs.partitioner.DefaultPartitioner; import io.confluent.connect.hdfs.partitioner.FieldPartitioner; import io.confluent.connect.hdfs.partitioner.HourlyPartitioner; import io.confluent.connect.hdfs.partitioner.Partitioner; @@ -414,6 +414,6 @@ public HdfsSinkConnectorConfig(Map props) { } public static void main(String[] args) { - System.out.println(config.toRst()); + System.out.println(config.toEnrichedRst()); } } diff --git a/src/main/java/io/confluent/connect/hdfs/avro/AvroHiveUtil.java b/src/main/java/io/confluent/connect/hdfs/avro/AvroHiveUtil.java index 218bb5a..85d9de8 100644 --- a/src/main/java/io/confluent/connect/hdfs/avro/AvroHiveUtil.java +++ b/src/main/java/io/confluent/connect/hdfs/avro/AvroHiveUtil.java @@ -60,7 +60,7 @@ public void alterSchema(String database, String tableName, Schema schema) throws private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException { - Table table = new Table(database, tableName); + Table table = newTable(database, tableName); table.setTableType(TableType.EXTERNAL_TABLE); table.getParameters().put("EXTERNAL", "TRUE"); String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName); diff --git a/src/main/java/io/confluent/connect/hdfs/avro/AvroRecordWriterProvider.java b/src/main/java/io/confluent/connect/hdfs/avro/AvroRecordWriterProvider.java index f637f4b..bbf89b3 100644 --- a/src/main/java/io/confluent/connect/hdfs/avro/AvroRecordWriterProvider.java +++ b/src/main/java/io/confluent/connect/hdfs/avro/AvroRecordWriterProvider.java @@ -16,6 +16,7 @@ package io.confluent.connect.hdfs.avro; +import io.confluent.kafka.serializers.NonRecordContainer; import org.apache.avro.file.DataFileWriter; import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.io.DatumWriter; @@ -61,7 +62,12 @@ public RecordWriter getRecordWriter(Configuration conf, final String public void write(SinkRecord record) throws IOException { log.trace("Sink record: {}", record.toString()); Object value = avroData.fromConnectData(schema, record.value()); - writer.append(value); + // AvroData wraps primitive types so their schema can be included. We need to unwrap NonRecordContainers to just + // their value to properly handle these types + if (value instanceof NonRecordContainer) + writer.append(((NonRecordContainer) value).getValue()); + else + writer.append(value); } @Override diff --git a/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java b/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java index 385278e..dca869a 100644 --- a/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java +++ b/src/main/java/io/confluent/connect/hdfs/hive/HiveMetaStore.java @@ -94,7 +94,7 @@ public Void call() throws TException { // purposely don't check if the partition already exists because // getPartition(db, table, path) will throw an exception to indicate the // partition doesn't exist also. this way, it's only one call. - client.appendPartition(database, tableName, path); + client.appendPartition(database, tableNameConverter(tableName), path); return null; } }; @@ -104,7 +104,7 @@ public Void call() throws TException { } catch (AlreadyExistsException e) { // this is okay } catch (InvalidObjectException e) { - throw new HiveMetaStoreException("Invalid partition for " + database + "." + tableName + ": " + path, e); + throw new HiveMetaStoreException("Invalid partition for " + database + "." + tableNameConverter(tableName) + ": " + path, e); } catch (MetaException e) { throw new HiveMetaStoreException("Hive MetaStore exception", e); } catch (TException e) { @@ -116,7 +116,7 @@ public void dropPartition(final String database, final String tableName, final S ClientAction dropPartition = new ClientAction() { @Override public Void call() throws TException { - client.dropPartition(database, tableName, path, false); + client.dropPartition(database, tableNameConverter(tableName), path, false); return null; } }; @@ -126,7 +126,7 @@ public Void call() throws TException { } catch (NoSuchObjectException e) { // this is okay } catch (InvalidObjectException e) { - throw new HiveMetaStoreException("Invalid partition for " + database + "." + tableName + ": " + path, e); + throw new HiveMetaStoreException("Invalid partition for " + database + "." + tableNameConverter(tableName) + ": " + path, e); } catch (MetaException e) { throw new HiveMetaStoreException("Hive MetaStore exception", e); } catch (TException e) { @@ -192,7 +192,7 @@ public Void call() throws TException { try { doAction(create); } catch (NoSuchObjectException e) { - throw new HiveMetaStoreException("Hive table not found: " + table.getDbName() + "." + table.getTableName()); + throw new HiveMetaStoreException("Hive table not found: " + table.getDbName() + "." + tableNameConverter(table.getTableName())); } catch (AlreadyExistsException e) { // this is okey log.warn("Hive table already exists: {}.{}", table.getDbName(), table.getTableName()); @@ -209,7 +209,7 @@ public void alterTable(final Table table) throws HiveMetaStoreException { ClientAction alter = new ClientAction() { @Override public Void call() throws TException { - client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); + client.alter_table(table.getDbName(), tableNameConverter(table.getTableName()), table.getTTable()); return null; } }; @@ -233,7 +233,7 @@ public void dropTable(final String database, final String tableName) { ClientAction drop = new ClientAction() { @Override public Void call() throws TException { - client.dropTable(database, tableName, false, true); + client.dropTable(database, tableNameConverter(tableName), false, true); return null; } }; @@ -253,7 +253,7 @@ public boolean tableExists(final String database, final String tableName) throws ClientAction exists = new ClientAction() { @Override public Boolean call() throws TException { - return client.tableExists(database, tableName); + return client.tableExists(database, tableNameConverter(tableName)); } }; try { @@ -271,7 +271,7 @@ public Table getTable(final String database, final String tableName) throws Hive ClientAction getTable = new ClientAction
() { @Override public Table call() throws TException { - return new Table(client.getTable(database, tableName)); + return new Table(client.getTable(database, tableNameConverter(tableName))); } }; @@ -279,7 +279,7 @@ public Table call() throws TException { try { table = doAction(getTable); } catch (NoSuchObjectException e) { - throw new HiveMetaStoreException("Hive table not found: " + database + "." + tableName); + throw new HiveMetaStoreException("Hive table not found: " + database + "." + tableNameConverter(tableName)); } catch (MetaException e) { throw new HiveMetaStoreException("Hive table lookup exception", e); } catch (TException e) { @@ -287,7 +287,7 @@ public Table call() throws TException { } if (table == null) { - throw new HiveMetaStoreException("Could not find info for table: " + tableName); + throw new HiveMetaStoreException("Could not find info for table: " + tableNameConverter(tableName)); } return table; } @@ -296,7 +296,7 @@ public List listPartitions(final String database, final String tableName ClientAction> listPartitions = new ClientAction>() { @Override public List call() throws TException { - List partitions = client.listPartitions(database, tableName, max); + List partitions = client.listPartitions(database, tableNameConverter(tableName), max); List paths = new ArrayList<>(); for (Partition partition : partitions) { paths.add(partition.getSd().getLocation()); @@ -354,4 +354,8 @@ public List call() throws TException { throw new HiveMetaStoreException("Exception communicating with the Hive MetaStore", e); } } + + public String tableNameConverter(String table){ + return table == null ? table : table.replaceAll("\\.", "_"); + } } diff --git a/src/main/java/io/confluent/connect/hdfs/hive/HiveUtil.java b/src/main/java/io/confluent/connect/hdfs/hive/HiveUtil.java index c466b04..a1011fc 100644 --- a/src/main/java/io/confluent/connect/hdfs/hive/HiveUtil.java +++ b/src/main/java/io/confluent/connect/hdfs/hive/HiveUtil.java @@ -15,6 +15,7 @@ package io.confluent.connect.hdfs.hive; import com.qubole.streamx.s3.S3SinkConnectorConfig; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.kafka.connect.data.Schema; import io.confluent.connect.avro.AvroData; @@ -39,4 +40,8 @@ public HiveUtil(HdfsSinkConnectorConfig connectorConfig, AvroData avroData, Hive public abstract void createTable(String database, String tableName, Schema schema, Partitioner partitioner); public abstract void alterSchema(String database, String tableName, Schema schema); + + public Table newTable(String database, String table){ + return new Table(database, hiveMetaStore.tableNameConverter(table)); + } } diff --git a/src/main/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtil.java b/src/main/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtil.java index 0664b8c..9fa6aaa 100644 --- a/src/main/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtil.java +++ b/src/main/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtil.java @@ -55,7 +55,7 @@ public void alterSchema(String database, String tableName, Schema schema) { } private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException { - Table table = new Table(database, tableName); + Table table = newTable(database, tableName); table.setTableType(TableType.EXTERNAL_TABLE); table.getParameters().put("EXTERNAL", "TRUE"); String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName); diff --git a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java index d2c0e0e..835a0b8 100644 --- a/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java +++ b/src/test/java/io/confluent/connect/hdfs/HdfsSinkConnectorTestBase.java @@ -25,8 +25,10 @@ import org.junit.After; import org.junit.Before; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -44,15 +46,16 @@ public class HdfsSinkConnectorTestBase { protected MockSinkTaskContext context; protected static final String TOPIC = "topic"; + protected static final String TOPIC_WITH_DOTS = "topic.with.dots"; protected static final int PARTITION = 12; protected static final int PARTITION2 = 13; protected static final int PARTITION3 = 14; protected static final TopicPartition TOPIC_PARTITION = new TopicPartition(TOPIC, PARTITION); protected static final TopicPartition TOPIC_PARTITION2 = new TopicPartition(TOPIC, PARTITION2); protected static final TopicPartition TOPIC_PARTITION3 = new TopicPartition(TOPIC, PARTITION3); + protected static final TopicPartition TOPIC_WITH_DOTS_PARTITION = new TopicPartition(TOPIC_WITH_DOTS, PARTITION); protected static Set assignment; - protected Map createProps() { Map props = new HashMap<>(); props.put(HdfsSinkConnectorConfig.HDFS_URL_CONFIG, url); @@ -70,13 +73,17 @@ protected Schema createSchema() { .build(); } - protected Struct createRecord(Schema schema) { + protected Struct createRecord(Schema schema, int ibase, float fbase) { return new Struct(schema) .put("boolean", true) - .put("int", 12) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); + .put("int", ibase) + .put("long", (long) ibase) + .put("float", fbase) + .put("double", (double) fbase); + } + + protected Struct createRecord(Schema schema) { + return createRecord(schema, 12, 12.2f); } protected Schema createNewSchema() { @@ -100,6 +107,28 @@ protected Struct createNewRecord(Schema newSchema) { .put("string", "def"); } + // Create a batch of records with incremental numeric field values. Total number of records is + // given by 'size'. + protected List createRecordBatch(Schema schema, int size) { + ArrayList records = new ArrayList<>(size); + int ibase = 16; + float fbase = 12.2f; + + for (int i = 0; i < size; ++i) { + records.add(createRecord(schema, ibase + i, fbase + i)); + } + return records; + } + + // Create a list of records by repeating the same record batch. Total number of records: 'batchesNum' x 'batchSize' + protected List createRecordBatches(Schema schema, int batchSize, int batchesNum) { + ArrayList records = new ArrayList<>(); + for (int i = 0; i < batchesNum; ++i) { + records.addAll(createRecordBatch(schema, batchSize)); + } + return records; + } + @Before public void setUp() throws Exception { conf = new Configuration(); @@ -181,13 +210,12 @@ public Set assignment() { } @Override - public void pause(TopicPartition... partitions) { - return; - } + public void pause(TopicPartition... partitions) {} @Override - public void resume(TopicPartition... partitions) { - return; - } + public void resume(TopicPartition... partitions) {} + + @Override + public void requestCommit() {} } } diff --git a/src/test/java/io/confluent/connect/hdfs/HdfsSinkTaskTest.java b/src/test/java/io/confluent/connect/hdfs/HdfsSinkTaskTest.java index 1e75d10..0ec47e2 100644 --- a/src/test/java/io/confluent/connect/hdfs/HdfsSinkTaskTest.java +++ b/src/test/java/io/confluent/connect/hdfs/HdfsSinkTaskTest.java @@ -14,6 +14,7 @@ package io.confluent.connect.hdfs; +import io.confluent.kafka.serializers.NonRecordContainer; import org.apache.hadoop.fs.Path; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.connect.data.Schema; @@ -179,6 +180,49 @@ public void testSinkTaskPut() throws Exception { } } + @Test + public void testSinkTaskPutPrimitive() throws Exception { + Map props = createProps(); + HdfsSinkTask task = new HdfsSinkTask(); + + final String key = "key"; + final Schema schema = Schema.INT32_SCHEMA; + final int record = 12; + Collection sinkRecords = new ArrayList<>(); + for (TopicPartition tp: assignment) { + for (long offset = 0; offset < 7; offset++) { + SinkRecord sinkRecord = + new SinkRecord(tp.topic(), tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset); + sinkRecords.add(sinkRecord); + } + } + task.initialize(context); + task.start(props); + task.put(sinkRecords); + task.stop(); + + AvroData avroData = task.getAvroData(); + // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close + long[] validOffsets = {-1, 2, 5}; + + for (TopicPartition tp : assignment) { + String directory = tp.topic() + "/" + "partition=" + String.valueOf(tp.partition()); + for (int j = 1; j < validOffsets.length; ++j) { + long startOffset = validOffsets[j - 1] + 1; + long endOffset = validOffsets[j]; + Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory, tp, + startOffset, endOffset, extension, + ZERO_PAD_FMT)); + Collection records = schemaFileReader.readData(conf, path); + long size = endOffset - startOffset + 1; + assertEquals(records.size(), size); + for (Object avroRecord : records) { + assertEquals(avroRecord, record); + } + } + } + } + private void createCommittedFiles() throws IOException { String file1 = FileUtils.committedFileName(url, topicsDir, DIRECTORY1, TOPIC_PARTITION, 0, 10, extension, ZERO_PAD_FMT); diff --git a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java index 6e29a4f..ffa26fb 100644 --- a/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java +++ b/src/test/java/io/confluent/connect/hdfs/TestWithMiniDFSCluster.java @@ -17,18 +17,44 @@ package io.confluent.connect.hdfs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.connect.data.SchemaProjector; +import org.apache.kafka.connect.data.Struct; +import org.apache.kafka.connect.sink.SinkRecord; import org.junit.After; import org.junit.Before; +import java.io.FileNotFoundException; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.Set; + +import io.confluent.connect.hdfs.filter.TopicPartitionCommittedFileFilter; +import io.confluent.connect.hdfs.partitioner.Partitioner; + +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; public class TestWithMiniDFSCluster extends HdfsSinkConnectorTestBase { protected MiniDFSCluster cluster; protected FileSystem fs; + protected SchemaFileReader schemaFileReader; + protected Partitioner partitioner; + protected String extension; + // The default based on default configuration of 10 + protected String zeroPadFormat = "%010d"; @Before public void setUp() throws Exception { @@ -69,4 +95,235 @@ protected Map createProps() { props.put(HdfsSinkConnectorConfig.HDFS_URL_CONFIG, url); return props; } + + /** + * Return a list of new records starting at zero offset. + * + * @param size the number of records to return. + * @return the list of records. + */ + protected List createSinkRecords(int size) { + return createSinkRecords(size, 0); + } + + /** + * Return a list of new records starting at the given offset. + * + * @param size the number of records to return. + * @param startOffset the starting offset. + * @return the list of records. + */ + protected List createSinkRecords(int size, long startOffset) { + return createSinkRecords(size, startOffset, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); + } + + /** + * Return a list of new records for a set of partitions, starting at the given offset in each partition. + * + * @param size the number of records to return. + * @param startOffset the starting offset. + * @param partitions the set of partitions to create records for. + * @return the list of records. + */ + protected List createSinkRecords(int size, long startOffset, Set partitions) { + Schema schema = createSchema(); + Struct record = createRecord(schema); + List same = new ArrayList<>(); + for (int i = 0; i < size; ++i) { + same.add(record); + } + return createSinkRecords(same, schema, startOffset, partitions); + } + + protected List createSinkRecords(List records, Schema schema) { + return createSinkRecords(records, schema, 0, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); + } + + protected List createSinkRecords(List records, Schema schema, long startOffset, + Set partitions) { + String key = "key"; + List sinkRecords = new ArrayList<>(); + for (TopicPartition tp : partitions) { + long offset = startOffset; + for (Struct record : records) { + sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset++)); + } + } + return sinkRecords; + } + + protected List createSinkRecordsNoVersion(int size, long startOffset) { + String key = "key"; + Schema schemaNoVersion = SchemaBuilder.struct().name("record") + .field("boolean", Schema.BOOLEAN_SCHEMA) + .field("int", Schema.INT32_SCHEMA) + .field("long", Schema.INT64_SCHEMA) + .field("float", Schema.FLOAT32_SCHEMA) + .field("double", Schema.FLOAT64_SCHEMA) + .build(); + + Struct recordNoVersion = new Struct(schemaNoVersion); + recordNoVersion.put("boolean", true) + .put("int", 12) + .put("long", 12L) + .put("float", 12.2f) + .put("double", 12.2); + + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset; offset < startOffset + size; ++offset) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schemaNoVersion, + recordNoVersion, offset)); + } + return sinkRecords; + } + + protected List createSinkRecordsWithAlternatingSchemas(int size, long startOffset) { + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + Schema newSchema = createNewSchema(); + Struct newRecord = createNewRecord(newSchema); + + int limit = (size / 2) * 2; + boolean remainder = size % 2 > 0; + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset; offset < startOffset + limit; ++offset) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset)); + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, ++offset)); + } + if (remainder) { + sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, + startOffset + size - 1)); + } + return sinkRecords; + } + + protected List createSinkRecordsInterleaved(int size, long startOffset, Set partitions) { + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + + List sinkRecords = new ArrayList<>(); + for (long offset = startOffset, total = 0; total < size; ++offset) { + for (TopicPartition tp : partitions) { + sinkRecords.add(new SinkRecord(TOPIC, tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset)); + if (++total >= size) { + break; + } + } + } + return sinkRecords; + } + + protected String getDirectory() { + return getDirectory(TOPIC, PARTITION); + } + + protected String getDirectory(String topic, int partition) { + String encodedPartition = "partition=" + String.valueOf(partition); + return partitioner.generatePartitionedPath(topic, encodedPartition); + } + + /** + * Verify files and records are uploaded appropriately. + * + * @param sinkRecords a flat list of the records that need to appear in potentially several files + * in HDFS. + * @param validOffsets an array containing the offsets that map to uploaded files for a + * topic-partition. Offsets appear in ascending order, the difference between two consecutive + * offsets equals the expected size of the file, and last offset is exclusive. + */ + protected void verify(List sinkRecords, long[] validOffsets) throws IOException { + verify(sinkRecords, validOffsets, Collections.singleton(new TopicPartition(TOPIC, PARTITION)), false); + } + + protected void verify(List sinkRecords, long[] validOffsets, Set partitions) + throws IOException { + verify(sinkRecords, validOffsets, partitions, false); + } + + /** + * Verify files and records are uploaded appropriately. + * + * @param sinkRecords a flat list of the records that need to appear in potentially several * + * files in HDFS. + * @param validOffsets an array containing the offsets that map to uploaded files for a + * topic-partition. Offsets appear in ascending order, the difference between two consecutive + * offsets equals the expected size of the file, and last offset is exclusive. + * @param partitions the set of partitions to verify records for. + */ + protected void verify(List sinkRecords, long[] validOffsets, Set partitions, + boolean skipFileListing) throws IOException { + if (!skipFileListing) { + verifyFileListing(validOffsets, partitions); + } + + for (TopicPartition tp : partitions) { + for (int i = 1, j = 0; i < validOffsets.length; ++i) { + long startOffset = validOffsets[i - 1]; + long endOffset = validOffsets[i] - 1; + + String filename = FileUtils.committedFileName(url, topicsDir, getDirectory(tp.topic(), tp.partition()), tp, + startOffset, endOffset, extension, zeroPadFormat); + Path path = new Path(filename); + Collection records = schemaFileReader.readData(conf, path); + + long size = endOffset - startOffset + 1; + assertEquals(size, records.size()); + verifyContents(sinkRecords, j, records); + j += size; + } + } + } + + protected List getExpectedFiles(long[] validOffsets, TopicPartition tp) { + List expectedFiles = new ArrayList<>(); + for (int i = 1; i < validOffsets.length; ++i) { + long startOffset = validOffsets[i - 1]; + long endOffset = validOffsets[i] - 1; + expectedFiles.add(FileUtils.committedFileName(url, topicsDir, getDirectory(tp.topic(), tp.partition()), tp, + startOffset, endOffset, extension, zeroPadFormat)); + } + return expectedFiles; + } + + protected void verifyFileListing(long[] validOffsets, Set partitions) throws IOException { + for (TopicPartition tp : partitions) { + verifyFileListing(getExpectedFiles(validOffsets, tp), tp); + } + } + + protected void verifyFileListing(List expectedFiles, TopicPartition tp) throws IOException { + FileStatus[] statuses = {}; + try { + statuses = fs.listStatus( + new Path(FileUtils.directoryName(url, topicsDir, getDirectory(tp.topic(), tp.partition()))), + new TopicPartitionCommittedFileFilter(tp)); + } catch (FileNotFoundException e) { + // the directory does not exist. + } + + List actualFiles = new ArrayList<>(); + for (FileStatus status : statuses) { + actualFiles.add(status.getPath().toString()); + } + + Collections.sort(actualFiles); + Collections.sort(expectedFiles); + assertThat(actualFiles, is(expectedFiles)); + } + + protected void verifyContents(List expectedRecords, int startIndex, Collection records) { + Schema expectedSchema = null; + for (Object avroRecord : records) { + if (expectedSchema == null) { + expectedSchema = expectedRecords.get(startIndex).valueSchema(); + } + Object expectedValue = SchemaProjector.project(expectedRecords.get(startIndex).valueSchema(), + expectedRecords.get(startIndex++).value(), + expectedSchema); + assertEquals(avroData.fromConnectData(expectedSchema, expectedValue), avroRecord); + } + } + } diff --git a/src/test/java/io/confluent/connect/hdfs/avro/AvroHiveUtilTest.java b/src/test/java/io/confluent/connect/hdfs/avro/AvroHiveUtilTest.java index 52d553b..74322e1 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/AvroHiveUtilTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/AvroHiveUtilTest.java @@ -26,7 +26,6 @@ import org.junit.Test; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import io.confluent.connect.avro.AvroData; @@ -58,14 +57,17 @@ public void testCreateTable() throws Exception { String location = "partition=" + String.valueOf(PARTITION); hiveMetaStore.addPartition(hiveDatabase, TOPIC, location); + Struct expectedRecord = createRecord(schema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } @@ -74,15 +76,15 @@ public void testCreateTable() throws Exception { assertEquals(1, partitionCols.size()); assertEquals("partition", partitionCols.get(0).getName()); - String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "12"}; String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC); String[] rows = result.split("\n"); // Only 6 of the 7 records should have been delivered due to flush_size = 3 assertEquals(6, rows.length); - for (String row: rows) { + for (String row : rows) { String[] parts = HiveTestUtils.parseOutput(row); - for (int j = 0; j < expectedResult.length; ++j) { - assertEquals(expectedResult[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResult) { + assertEquals(expectedValue, parts[j++]); } } } @@ -97,14 +99,17 @@ public void testAlterSchema() throws Exception { String location = "partition=" + String.valueOf(PARTITION); hiveMetaStore.addPartition(hiveDatabase, TOPIC, location); + Struct expectedRecord = createRecord(schema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } @@ -114,15 +119,15 @@ public void testAlterSchema() throws Exception { hive.alterSchema(hiveDatabase, TOPIC, newSchema); - String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "abc", "12"}; String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC); String[] rows = result.split("\n"); // Only 6 of the 7 records should have been delivered due to flush_size = 3 assertEquals(6, rows.length); - for (String row: rows) { + for (String row : rows) { String[] parts = HiveTestUtils.parseOutput(row); - for (int j = 0; j < expectedResult.length; ++j) { - assertEquals(expectedResult[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResult) { + assertEquals(expectedValue, parts[j++]); } } } @@ -131,16 +136,8 @@ private void prepareData(String topic, int partition) throws Exception { TopicPartition tp = new TopicPartition(topic, partition); DataWriter hdfsWriter = createWriter(context, avroData); hdfsWriter.recover(tp); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } + List sinkRecords = createSinkRecords(7); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); diff --git a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java index f16aae5..2040abc 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/DataWriterAvroTest.java @@ -14,34 +14,27 @@ package io.confluent.connect.hdfs.avro; -import io.confluent.connect.hdfs.DataWriter; -import io.confluent.connect.hdfs.FileUtils; -import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; -import io.confluent.connect.hdfs.SchemaFileReader; -import io.confluent.connect.hdfs.TestWithMiniDFSCluster; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.data.SchemaBuilder; -import org.apache.kafka.connect.data.SchemaProjector; -import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.Before; import org.junit.Test; -import io.confluent.connect.hdfs.filter.TopicPartitionCommittedFileFilter; -import io.confluent.connect.hdfs.partitioner.Partitioner; -import io.confluent.connect.hdfs.storage.Storage; -import io.confluent.connect.hdfs.storage.StorageFactory; -import io.confluent.connect.hdfs.wal.WAL; - import java.util.ArrayList; -import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import io.confluent.connect.hdfs.DataWriter; +import io.confluent.connect.hdfs.FileUtils; +import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; +import io.confluent.connect.hdfs.TestWithMiniDFSCluster; +import io.confluent.connect.hdfs.storage.Storage; +import io.confluent.connect.hdfs.storage.StorageFactory; +import io.confluent.connect.hdfs.wal.WAL; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -49,50 +42,28 @@ public class DataWriterAvroTest extends TestWithMiniDFSCluster { - private static final String extension = ".avro"; - private static final String ZERO_PAD_FMT = "%010d"; - private SchemaFileReader schemaFileReader = new AvroFileReader(avroData); - + @Before + public void setUp() throws Exception { + super.setUp(); + schemaFileReader = new AvroFileReader(avroData); + extension = ".avro"; + } + @Test public void testWriteRecord() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); - Partitioner partitioner = hdfsWriter.getPartitioner(); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); + List sinkRecords = createSinkRecords(7); - sinkRecords.add(sinkRecord); - } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - String encodedPartition = "partition=" + String.valueOf(PARTITION); - String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition); - // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close - long[] validOffsets = {-1, 2, 5}; - for (int i = 1; i < validOffsets.length; i++) { - long startOffset = validOffsets[i - 1] + 1; - long endOffset = validOffsets[i]; - Path path = - new Path(FileUtils - .committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset, - endOffset, extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(size, records.size()); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsets = {0, 3, 6}; + verify(sinkRecords, validOffsets); } @Test @@ -103,99 +74,93 @@ public void testRecovery() throws Exception { Class storageClass = (Class) Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG)); Storage storage = StorageFactory.createStorage(storageClass, connectorConfig, conf, url); + // Storage storage = StorageFactory.createStorage(storageClass, conf, url); + DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); WAL wal = storage.wal(logsDir, TOPIC_PARTITION); wal.append(WAL.beginMarker, ""); - Set committedFiles = new HashSet<>(); - - String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); for (int i = 0; i < 5; ++i) { long startOffset = i * 10; long endOffset = (i + 1) * 10 - 1; - String tempfile = FileUtils.tempFileName(url, topicsDir, directory, extension); + String tempfile = FileUtils.tempFileName(url, topicsDir, getDirectory(), extension); fs.createNewFile(new Path(tempfile)); - String committedFile = FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset, - endOffset, extension, ZERO_PAD_FMT); - committedFiles.add(committedFile); + String committedFile = FileUtils.committedFileName(url, topicsDir, getDirectory(), TOPIC_PARTITION, startOffset, + endOffset, extension, zeroPadFormat); wal.append(tempfile, committedFile); } wal.append(WAL.endMarker, ""); wal.close(); - DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); Map offsets = context.offsets(); assertTrue(offsets.containsKey(TOPIC_PARTITION)); assertEquals(50L, (long) offsets.get(TOPIC_PARTITION)); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - // Need enough records to trigger file rotation - ArrayList sinkRecords = new ArrayList<>(); - for (int i = 0; i < 3; i++) - sinkRecords.add( - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 50 + i)); + List sinkRecords = createSinkRecords(3, 50); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - committedFiles.add(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, - 50, 52, extension, ZERO_PAD_FMT)); - FileStatus[] statuses = fs.listStatus(new Path(FileUtils.directoryName(url, topicsDir, directory)), - new TopicPartitionCommittedFileFilter(TOPIC_PARTITION)); - assertEquals(committedFiles.size(), statuses.length); - for (FileStatus status : statuses) { - assertTrue(committedFiles.contains(status.getPath().toString())); - } + long[] validOffsets = {0, 10, 20, 30, 40, 50, 53}; + verifyFileListing(validOffsets, Collections.singleton(new TopicPartition(TOPIC, PARTITION))); } @Test public void testWriteRecordMultiplePartitions() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); for (TopicPartition tp: assignment) { hdfsWriter.recover(tp); } - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - Collection sinkRecords = new ArrayList<>(); - for (TopicPartition tp: assignment) { - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(tp.topic(), tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } - } + List sinkRecords = createSinkRecords(7, 0, assignment); + hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close - long[] validOffsets = {-1, 2, 5}; - - for (TopicPartition tp : assignment) { - String directory = tp.topic() + "/" + "partition=" + String.valueOf(tp.partition()); - for (int j = 1; j < validOffsets.length; ++j) { - long startOffset = validOffsets[j - 1] + 1; - long endOffset = validOffsets[j]; - Path path = new Path( - FileUtils.committedFileName(url, topicsDir, directory, tp, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(records.size(), size); - for (Object avroRecord : records) { - assertEquals(avroRecord, avroData.fromConnectData(schema, record)); - } - } + long[] validOffsets = {0, 3, 6}; + verify(sinkRecords, validOffsets, assignment); + } + + @Test + public void testWriteInterleavedRecordsInMultiplePartitions() throws Exception { + DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); + + for (TopicPartition tp: assignment) { + hdfsWriter.recover(tp); } + + List sinkRecords = createSinkRecordsInterleaved(7 * assignment.size(), 0, assignment); + + hdfsWriter.write(sinkRecords); + hdfsWriter.close(assignment); + hdfsWriter.stop(); + + long[] validOffsets = {0, 3, 6}; + verify(sinkRecords, validOffsets, assignment); + } + + @Test + public void testWriteInterleavedRecordsInMultiplePartitionsNonZeroInitialOffset() throws Exception { + DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); + + List sinkRecords = createSinkRecordsInterleaved(7 * assignment.size(), 9, assignment); + + hdfsWriter.write(sinkRecords); + hdfsWriter.close(assignment); + hdfsWriter.stop(); + + long[] validOffsets = {9, 12, 15}; + verify(sinkRecords, validOffsets, assignment); } @Test @@ -206,7 +171,7 @@ public void testGetPreviousOffsets() throws Exception { for (int i = 0; i < startOffsets.length; ++i) { Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffsets[i], - endOffsets[i], extension, ZERO_PAD_FMT)); + endOffsets[i], extension, zeroPadFormat)); fs.createNewFile(path); } Path path = new Path(FileUtils.tempFileName(url, topicsDir, directory, extension)); @@ -229,65 +194,33 @@ public void testGetPreviousOffsets() throws Exception { } @Test - public void testWriteRecordNonZeroInitailOffset() throws Exception { + public void testWriteRecordNonZeroInitialOffset() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); - Partitioner partitioner = hdfsWriter.getPartitioner(); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 3; offset < 10; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } + List sinkRecords = createSinkRecords(7, 3); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - String directory = partitioner.generatePartitionedPath(TOPIC, "partition=" + String.valueOf(PARTITION)); - // Last file (offset 9) doesn't satisfy size requirement and gets discarded on close - long[] validOffsets = {2, 5, 8}; - for (int i = 1; i < validOffsets.length; i++) { - long startOffset = validOffsets[i - 1] + 1; - long endOffset = validOffsets[i]; - Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory, - TOPIC_PARTITION, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(size, records.size()); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsets = {3, 6, 9}; + verify(sinkRecords, validOffsets); } @Test public void testRebalance() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); // Initial assignment is {TP1, TP2} for (TopicPartition tp: assignment) { hdfsWriter.recover(tp); } - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - Collection sinkRecords = new ArrayList<>(); - for (TopicPartition tp: assignment) { - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(tp.topic(), tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } - } + List sinkRecords = createSinkRecords(7, 0, assignment); hdfsWriter.write(sinkRecords); Set oldAssignment = new HashSet<>(assignment); @@ -305,72 +238,26 @@ public void testRebalance() throws Exception { assertNotNull(hdfsWriter.getBucketWriter(TOPIC_PARTITION3)); // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close - long[] validOffsetsTopicPartition2 = {-1, 2, 5}; - String directory2 = TOPIC + "/" + "partition=" + String.valueOf(PARTITION2); - for (int j = 1; j < validOffsetsTopicPartition2.length; ++j) { - long startOffset = validOffsetsTopicPartition2[j - 1] + 1; - long endOffset = validOffsetsTopicPartition2[j]; - Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory2, - TOPIC_PARTITION2, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(records.size(), size); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsetsTopicPartition2 = {0, 3, 6}; + verify(sinkRecords, validOffsetsTopicPartition2, Collections.singleton(TOPIC_PARTITION2), true); - sinkRecords.clear(); - for (TopicPartition tp: assignment) { - // Message offsets start at 6 because we discarded the in-progress temp file on rebalance - for (long offset = 6; offset < 10; offset++) { - SinkRecord sinkRecord = - new SinkRecord(tp.topic(), tp.partition(), Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } - } + // Message offsets start at 6 because we discarded the in-progress temp file on re-balance + sinkRecords = createSinkRecords(3, 6, assignment); hdfsWriter.write(sinkRecords); hdfsWriter.close(newAssignment); hdfsWriter.stop(); // Last file (offset 9) doesn't satisfy size requirement and gets discarded on close - long[] validOffsetsTopicPartition1 = {5, 8}; - String directory1 = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); - for (int j = 1; j < validOffsetsTopicPartition1.length; ++j) { - long startOffset = validOffsetsTopicPartition1[j - 1] + 1; - long endOffset = validOffsetsTopicPartition1[j]; - Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory1 , - TOPIC_PARTITION, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(records.size(), size); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsetsTopicPartition1 = {6, 9}; + verify(sinkRecords, validOffsetsTopicPartition1, Collections.singleton(TOPIC_PARTITION), true); + + long[] validOffsetsTopicPartition3 = {6, 9}; + verify(sinkRecords, validOffsetsTopicPartition3, Collections.singleton(TOPIC_PARTITION3), true); - long[] validOffsetsTopicPartition3 = {5, 8}; - String directory3 = TOPIC + "/" + "partition=" + String.valueOf(PARTITION3); - for (int j = 1; j < validOffsetsTopicPartition3.length; ++j) { - long startOffset = validOffsetsTopicPartition3[j - 1] + 1; - long endOffset = validOffsetsTopicPartition3[j]; - Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory3, - TOPIC_PARTITION3, startOffset, endOffset, - extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(records.size(), size); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } assignment = oldAssignment; } - @Test public void testProjectBackWard() throws Exception { Map props = createProps(); @@ -379,53 +266,16 @@ public void testProjectBackWard() throws Exception { HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Schema newSchema = createNewSchema(); - Struct newRecord = createNewRecord(newSchema); - - Collection sinkRecords = new ArrayList<>(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 0L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 1L)); + List sinkRecords = createSinkRecordsWithAlternatingSchemas(7, 0); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - - String DIRECTORY = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); - Path path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, - 0L, 1L, extension, ZERO_PAD_FMT)); - ArrayList records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(2, records.size()); - - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(0)); - - Object projected = SchemaProjector.project(schema, record, newSchema); - assertEquals(avroData.fromConnectData(newSchema, projected), records.get(1)); - - hdfsWriter = new DataWriter(connectorConfig, context, avroData); - hdfsWriter.recover(TOPIC_PARTITION); - - sinkRecords.clear(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 2L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, - newRecord, 3L)); - - hdfsWriter.write(sinkRecords); - hdfsWriter.close(assignment); - hdfsWriter.stop(); - - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 2L, - 3L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(2, records.size()); - - assertEquals(avroData.fromConnectData(newSchema, projected), records.get(0)); - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(1)); + long[] validOffsets = {0, 1, 3, 5, 7}; + verify(sinkRecords, validOffsets); } @Test @@ -435,58 +285,17 @@ public void testProjectNone() throws Exception { HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Schema newSchema = createNewSchema(); - Struct newRecord = createNewRecord(newSchema); - - Collection sinkRecords = new ArrayList<>(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 0L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 1L)); - // Include one more to get to forced file rotation - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 2L)); + List sinkRecords = createSinkRecordsWithAlternatingSchemas(7, 0); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - String DIRECTORY = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); - Path path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, - 0L, 0L, extension, ZERO_PAD_FMT)); - ArrayList records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(1, records.size()); - - - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(0)); - - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 1L, - 2L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(avroData.fromConnectData(schema, record), records.get(0)); - assertEquals(avroData.fromConnectData(schema, record), records.get(1)); - - hdfsWriter = new DataWriter(connectorConfig, context, avroData); - hdfsWriter.recover(TOPIC_PARTITION); - - sinkRecords.clear(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 3L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 4L)); - - hdfsWriter.write(sinkRecords); - hdfsWriter.close(assignment); - hdfsWriter.stop(); - - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 3L, - 4L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(2, records.size()); - - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(0)); - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(1)); + long[] validOffsets = {0, 1, 2, 3, 4, 5, 6}; + verify(sinkRecords, validOffsets); } @Test @@ -497,99 +306,44 @@ public void testProjectForward() throws Exception { HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Schema newSchema = createNewSchema(); - Struct newRecord = createNewRecord(newSchema); - - Collection sinkRecords = new ArrayList<>(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 0L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 1L)); - // Include one more to get to forced file rotation - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 2L)); - - hdfsWriter.write(sinkRecords); - hdfsWriter.close(assignment); - hdfsWriter.stop(); - - String DIRECTORY = TOPIC + "/" + "partition=" + String.valueOf(PARTITION); - Path path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, - 0L, 0L, extension, ZERO_PAD_FMT)); - ArrayList records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(1, records.size()); - - assertEquals(avroData.fromConnectData(newSchema, newRecord), records.get(0)); - - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 1L, - 2L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(avroData.fromConnectData(schema, record), records.get(0)); - assertEquals(avroData.fromConnectData(schema, record), records.get(1)); - - hdfsWriter = new DataWriter(connectorConfig, context, avroData); - hdfsWriter.recover(TOPIC_PARTITION); - - sinkRecords.clear(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 3L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, newSchema, newRecord, 4L)); + // By excluding the first element we get a list starting with record having the new schema. + List sinkRecords = createSinkRecordsWithAlternatingSchemas(8, 0).subList(1, 8); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - path = new Path(FileUtils.committedFileName(url, topicsDir, DIRECTORY, TOPIC_PARTITION, 3L, - 4L, extension, ZERO_PAD_FMT)); - records = (ArrayList) schemaFileReader.readData(conf, path); - assertEquals(2, records.size()); - - assertEquals(avroData.fromConnectData(schema, record), records.get(0)); - assertEquals(avroData.fromConnectData(schema, record), records.get(1)); + long[] validOffsets = {1, 2, 4, 6, 8}; + verify(sinkRecords, validOffsets); } @Test public void testProjectNoVersion() throws Exception { - Schema schemaNoVersion = SchemaBuilder.struct().name("record") - .field("boolean", Schema.BOOLEAN_SCHEMA) - .field("int", Schema.INT32_SCHEMA) - .field("long", Schema.INT64_SCHEMA) - .field("float", Schema.FLOAT32_SCHEMA) - .field("double", Schema.FLOAT64_SCHEMA) - .build(); - - Struct recordNoVersion = new Struct(schemaNoVersion); - recordNoVersion.put("boolean", true) - .put("int", 12) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - Map props = createProps(); props.put(HdfsSinkConnectorConfig.SCHEMA_COMPATIBILITY_CONFIG, "BACKWARD"); HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schemaNoVersion, recordNoVersion, 0L)); - sinkRecords.add(new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 1L)); + List sinkRecords = createSinkRecordsNoVersion(1, 0); + sinkRecords.addAll(createSinkRecordsWithAlternatingSchemas(7, 0)); try { hdfsWriter.write(sinkRecords); fail("Version is required for Backward compatibility."); } catch (RuntimeException e) { // expected + } finally { + hdfsWriter.close(assignment); + hdfsWriter.stop(); + long[] validOffsets = {}; + verify(Collections.emptyList(), validOffsets); } - hdfsWriter.close(assignment); - hdfsWriter.stop(); } @Test @@ -600,7 +354,7 @@ public void testFlushPartialFile() throws Exception { String FLUSH_SIZE_CONFIG = "10"; // send 1.5 * FLUSH_SIZE_CONFIG records - long NUMBER_OF_RECORD = Long.valueOf(FLUSH_SIZE_CONFIG) + Long.valueOf(FLUSH_SIZE_CONFIG) / 2; + int NUMBER_OF_RECORDS = Integer.valueOf(FLUSH_SIZE_CONFIG) + Integer.valueOf(FLUSH_SIZE_CONFIG) / 2; Map props = createProps(); props.put(HdfsSinkConnectorConfig.FLUSH_SIZE_CONFIG, FLUSH_SIZE_CONFIG); @@ -610,17 +364,10 @@ public void testFlushPartialFile() throws Exception { assignment.add(TOPIC_PARTITION); DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < NUMBER_OF_RECORD; offset++) { - SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } + List sinkRecords = createSinkRecords(NUMBER_OF_RECORDS); hdfsWriter.write(sinkRecords); // wait for rotation to happen @@ -634,7 +381,7 @@ public void testFlushPartialFile() throws Exception { Map committedOffsets = hdfsWriter.getCommittedOffsets(); assertTrue(committedOffsets.containsKey(TOPIC_PARTITION)); long previousOffset = committedOffsets.get(TOPIC_PARTITION); - assertEquals(NUMBER_OF_RECORD, previousOffset); + assertEquals(NUMBER_OF_RECORDS, previousOffset); hdfsWriter.close(assignment); hdfsWriter.stop(); diff --git a/src/test/java/io/confluent/connect/hdfs/avro/HiveIntegrationAvroTest.java b/src/test/java/io/confluent/connect/hdfs/avro/HiveIntegrationAvroTest.java index 1565836..bd6a983 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/HiveIntegrationAvroTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/HiveIntegrationAvroTest.java @@ -147,6 +147,54 @@ public void testHiveIntegrationAvro() throws Exception { assertEquals(expectedPartitions, partitions); } + @Test + public void testHiveIntegrationTopicWithDotsAvro() throws Exception { + assignment.add(TOPIC_WITH_DOTS_PARTITION); + + Map props = createProps(); + props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true"); + HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); + + DataWriter hdfsWriter = new DataWriter(config, context, avroData); + hdfsWriter.recover(TOPIC_WITH_DOTS_PARTITION); + + String key = "key"; + Schema schema = createSchema(); + Struct record = createRecord(schema); + + Collection sinkRecords = new ArrayList<>(); + for (long offset = 0; offset < 7; offset++) { + SinkRecord sinkRecord = + new SinkRecord(TOPIC_WITH_DOTS, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); + + sinkRecords.add(sinkRecord); + } + + hdfsWriter.write(sinkRecords); + hdfsWriter.close(assignment); + hdfsWriter.stop(); + + Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC_WITH_DOTS); + List expectedColumnNames = new ArrayList<>(); + for (Field field: schema.fields()) { + expectedColumnNames.add(field.name()); + } + + List actualColumnNames = new ArrayList<>(); + for (FieldSchema column: table.getSd().getCols()) { + actualColumnNames.add(column.getName()); + } + assertEquals(expectedColumnNames, actualColumnNames); + + List expectedPartitions = new ArrayList<>(); + String directory = TOPIC_WITH_DOTS + "/" + "partition=" + String.valueOf(PARTITION); + expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory)); + + List partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC_WITH_DOTS, (short)-1); + + assertEquals(expectedPartitions, partitions); + } + @Test public void testHiveIntegrationFieldPartitionerAvro() throws Exception { Map props = createProps(); diff --git a/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java b/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java index 4e4ef2c..2d5e21a 100644 --- a/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java +++ b/src/test/java/io/confluent/connect/hdfs/avro/TopicPartitionWriterTest.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -36,7 +37,6 @@ import io.confluent.connect.hdfs.Format; import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; import io.confluent.connect.hdfs.RecordWriterProvider; -import io.confluent.connect.hdfs.SchemaFileReader; import io.confluent.connect.hdfs.TestWithMiniDFSCluster; import io.confluent.connect.hdfs.TopicPartitionWriter; import io.confluent.connect.hdfs.filter.CommittedFileFilter; @@ -52,13 +52,8 @@ import static org.junit.Assert.assertTrue; public class TopicPartitionWriterTest extends TestWithMiniDFSCluster { - // The default based on default configuration of 10 - private static final String ZERO_PAD_FMT = "%010d"; - private RecordWriterProvider writerProvider; - private SchemaFileReader schemaFileReader; private Storage storage; - private static String extension; @Before public void setUp() throws Exception { @@ -86,11 +81,11 @@ public void testWriteRecordDefaultWithPadding() throws Exception { TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter( TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, avroData); - String key = "key"; Schema schema = createSchema(); - Struct[] records = createRecords(schema); - - Collection sinkRecords = createSinkRecords(records, key, schema); + List records = createRecordBatches(schema, 3, 3); + // Add a single records at the end of the batches sequence. Total records: 10 + records.add(createRecord(schema)); + List sinkRecords = createSinkRecords(records, schema); for (SinkRecord record : sinkRecords) { topicPartitionWriter.buffer(record); @@ -107,7 +102,8 @@ public void testWriteRecordDefaultWithPadding() throws Exception { "/" + TOPIC + "+" + PARTITION + "+03+05" + extension)); expectedFiles.add(new Path(url + "/" + topicsDir + "/" + TOPIC + "/partition=" + PARTITION + "/" + TOPIC + "+" + PARTITION + "+06+08" + extension)); - verify(expectedFiles, records, schema); + int expectedBatchSize = 3; + verify(expectedFiles, expectedBatchSize, records, schema); } @@ -122,11 +118,17 @@ public void testWriteRecordFieldPartitioner() throws Exception { TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter( TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, avroData); - String key = "key"; Schema schema = createSchema(); - Struct[] records = createRecords(schema); + List records = new ArrayList<>(); + for (int i = 16; i < 19; ++i) { + for (int j = 0; j < 3; ++j) { + records.add(createRecord(schema, i, 12.2f)); - Collection sinkRecords = createSinkRecords(records, key, schema); + } + } + // Add a single records at the end of the batches sequence. Total records: 10 + records.add(createRecord(schema)); + List sinkRecords = createSinkRecords(records, schema); for (SinkRecord record : sinkRecords) { topicPartitionWriter.buffer(record); @@ -136,17 +138,17 @@ public void testWriteRecordFieldPartitioner() throws Exception { topicPartitionWriter.write(); topicPartitionWriter.close(); - String directory1 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(16)); String directory2 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(17)); String directory3 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(18)); Set expectedFiles = new HashSet<>(); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory1, TOPIC_PARTITION, 0, 2, extension, ZERO_PAD_FMT))); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory2, TOPIC_PARTITION, 3, 5, extension, ZERO_PAD_FMT))); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory3, TOPIC_PARTITION, 6, 8, extension, ZERO_PAD_FMT))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory1, TOPIC_PARTITION, 0, 2, extension, zeroPadFormat))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory2, TOPIC_PARTITION, 3, 5, extension, zeroPadFormat))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory3, TOPIC_PARTITION, 6, 8, extension, zeroPadFormat))); - verify(expectedFiles, records, schema); + int expectedBatchSize = 3; + verify(expectedFiles, expectedBatchSize, records, schema); } @Test @@ -158,11 +160,11 @@ public void testWriteRecordTimeBasedPartition() throws Exception { TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter( TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, avroData); - String key = "key"; Schema schema = createSchema(); - Struct[] records = createRecords(schema); - - Collection sinkRecords = createSinkRecords(records, key, schema); + List records = createRecordBatches(schema, 3, 3); + // Add a single records at the end of the batches sequence. Total records: 10 + records.add(createRecord(schema)); + List sinkRecords = createSinkRecords(records, schema); for (SinkRecord record : sinkRecords) { topicPartitionWriter.buffer(record); @@ -172,7 +174,6 @@ public void testWriteRecordTimeBasedPartition() throws Exception { topicPartitionWriter.write(); topicPartitionWriter.close(); - long partitionDurationMs = (Long) config.get(HdfsSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG); String pathFormat = (String) config.get(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG); String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG); @@ -183,11 +184,12 @@ public void testWriteRecordTimeBasedPartition() throws Exception { String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition); Set expectedFiles = new HashSet<>(); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 0, 2, extension, ZERO_PAD_FMT))); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 3, 5, extension, ZERO_PAD_FMT))); - expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 6, 8, extension, ZERO_PAD_FMT))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 0, 2, extension, zeroPadFormat))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 3, 5, extension, zeroPadFormat))); + expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 6, 8, extension, zeroPadFormat))); - verify(expectedFiles, records, schema); + int expectedBatchSize = 3; + verify(expectedFiles, expectedBatchSize, records, schema); } private Map createConfig() { @@ -214,51 +216,7 @@ private void createLogsDir(String url, String logsDir) throws IOException { } } - private Struct[] createRecords(Schema schema) { - Struct record1 = new Struct(schema) - .put("boolean", true) - .put("int", 16) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - Struct record2 = new Struct(schema) - .put("boolean", true) - .put("int", 17) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - Struct record3 = new Struct(schema) - .put("boolean", true) - .put("int", 18) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - ArrayList records = new ArrayList<>(); - records.add(record1); - records.add(record2); - records.add(record3); - return records.toArray(new Struct[records.size()]); - } - - - private ArrayList createSinkRecords(Struct[] records, String key, Schema schema) { - ArrayList sinkRecords = new ArrayList<>(); - long offset = 0; - for (Struct record : records) { - for (long count = 0; count < 3; count++) { - SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, - offset + count); - sinkRecords.add(sinkRecord); - } - offset = offset + 3; - } - return sinkRecords; - } - - private void verify(Set expectedFiles, Struct[] records, Schema schema) throws IOException { + private void verify(Set expectedFiles, int expectedSize, List records, Schema schema) throws IOException { Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC)); FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter()); assertEquals(expectedFiles.size(), statuses.length); @@ -267,11 +225,11 @@ private void verify(Set expectedFiles, Struct[] records, Schema schema) th Path filePath = status.getPath(); assertTrue(expectedFiles.contains(status.getPath())); Collection avroRecords = schemaFileReader.readData(conf, filePath); - assertEquals(3, avroRecords.size()); - for (Object avroRecord: avroRecords) { - assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord); + assertEquals(expectedSize, avroRecords.size()); + for (Object avroRecord : avroRecords) { + assertEquals(avroData.fromConnectData(schema, records.get(index++)), avroRecord); } - index++; } } + } diff --git a/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java b/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java index 0a0c399..ac4ae99 100644 --- a/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java +++ b/src/test/java/io/confluent/connect/hdfs/parquet/DataWriterParquetTest.java @@ -15,29 +15,26 @@ package io.confluent.connect.hdfs.parquet; -import org.apache.hadoop.fs.Path; -import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.Before; import org.junit.Test; -import java.util.ArrayList; -import java.util.Collection; +import java.util.List; import java.util.Map; import io.confluent.connect.hdfs.DataWriter; -import io.confluent.connect.hdfs.FileUtils; import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; -import io.confluent.connect.hdfs.SchemaFileReader; import io.confluent.connect.hdfs.TestWithMiniDFSCluster; import io.confluent.connect.hdfs.partitioner.Partitioner; -import static org.junit.Assert.assertEquals; - public class DataWriterParquetTest extends TestWithMiniDFSCluster { - private static final String ZERO_PAD_FMT = "%010d"; - private static final String extension = ".parquet"; - private final SchemaFileReader schemaFileReader = new ParquetFileReader(avroData); + + @Before + public void setUp() throws Exception { + super.setUp(); + schemaFileReader = new ParquetFileReader(avroData); + extension = ".parquet"; + } @Override protected Map createProps() { @@ -49,41 +46,17 @@ protected Map createProps() { @Test public void testWriteRecord() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); - Partitioner partitioner = hdfsWriter.getPartitioner(); + partitioner = hdfsWriter.getPartitioner(); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); + List sinkRecords = createSinkRecords(7); - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - - sinkRecords.add(sinkRecord); - } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); - String encodedPartition = "partition=" + String.valueOf(PARTITION); - String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition); - // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close - long[] validOffsets = {-1, 2, 5}; - for (int i = 1; i < validOffsets.length; i++) { - long startOffset = validOffsets[i - 1] + 1; - long endOffset = validOffsets[i]; - Path path = new Path( - FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset, - endOffset, extension, ZERO_PAD_FMT)); - Collection records = schemaFileReader.readData(conf, path); - long size = endOffset - startOffset + 1; - assertEquals(size, records.size()); - for (Object avroRecord : records) { - assertEquals(avroData.fromConnectData(schema, record), avroRecord); - } - } + long[] validOffsets = {0, 3, 6}; + verify(sinkRecords, validOffsets); } } diff --git a/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java b/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java index 6bf4a32..561f298 100644 --- a/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java +++ b/src/test/java/io/confluent/connect/hdfs/parquet/HiveIntegrationParquetTest.java @@ -25,7 +25,7 @@ import org.junit.Test; import java.util.ArrayList; -import java.util.Collection; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -58,16 +58,7 @@ public void testSyncWithHiveParquet() throws Exception { DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); - sinkRecords.add(sinkRecord); - } + List sinkRecords = createSinkRecords(7); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -80,14 +71,18 @@ public void testSyncWithHiveParquet() throws Exception { hdfsWriter = new DataWriter(config, context, avroData); hdfsWriter.syncWithHive(); + Schema schema = createSchema(); + Struct expectedRecord = createRecord(schema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); @@ -113,29 +108,21 @@ public void testHiveIntegrationParquet() throws Exception { DataWriter hdfsWriter = new DataWriter(config, context, avroData); hdfsWriter.recover(TOPIC_PARTITION); - String key = "key"; - Schema schema = createSchema(); - Struct record = createRecord(schema); - - Collection sinkRecords = new ArrayList<>(); - for (long offset = 0; offset < 7; offset++) { - SinkRecord sinkRecord = - new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset); + List sinkRecords = createSinkRecords(7); - sinkRecords.add(sinkRecord); - } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); + Schema schema = createSchema(); Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); } List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); @@ -159,20 +146,9 @@ public void testHiveIntegrationFieldPartitionerParquet() throws Exception { HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(config, context, avroData); - String key = "key"; Schema schema = createSchema(); - - Struct[] records = createRecords(schema); - ArrayList sinkRecords = new ArrayList<>(); - long offset = 0; - for (Struct record : records) { - for (long count = 0; count < 3; count++) { - SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, - offset + count); - sinkRecords.add(sinkRecord); - } - offset = offset + 3; - } + List records = createRecordBatches(schema, 3, 3); + List sinkRecords = createSinkRecords(records, schema); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -181,17 +157,16 @@ public void testHiveIntegrationFieldPartitionerParquet() throws Exception { Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); } List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); - String partitionFieldName = config.getString(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG); String directory1 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(16); String directory2 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(17); @@ -206,20 +181,25 @@ public void testHiveIntegrationFieldPartitionerParquet() throws Exception { assertEquals(expectedPartitions, partitions); - ArrayList expectedResult = new ArrayList<>(); - for (int i = 16; i <= 18; ++i) { - String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2"}; + List> expectedResults = new ArrayList<>(); + for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { - expectedResult.add(part); + List result = new ArrayList<>(); + for (Field field : schema.fields()) { + result.add(String.valueOf(records.get(i).get(field.name()))); + } + expectedResults.add(result); } } + String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC); String[] rows = result.split("\n"); assertEquals(9, rows.length); for (int i = 0; i < rows.length; ++i) { String[] parts = HiveTestUtils.parseOutput(rows[i]); - for (int j = 0; j < expectedResult.get(i).length; ++j) { - assertEquals(expectedResult.get(i)[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResults.get(i)) { + assertEquals(expectedValue, parts[j++]); } } } @@ -235,20 +215,9 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props); DataWriter hdfsWriter = new DataWriter(config, context, avroData); - String key = "key"; Schema schema = createSchema(); - - Struct[] records = createRecords(schema); - ArrayList sinkRecords = new ArrayList<>(); - long offset = 0; - for (Struct record : records) { - for (long count = 0; count < 3; count++) { - SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, - offset + count); - sinkRecords.add(sinkRecord); - } - offset = offset + 3; - } + List records = createRecordBatches(schema, 3, 3); + List sinkRecords = createSinkRecords(records, schema); hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); @@ -257,12 +226,12 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); } List actualColumnNames = new ArrayList<>(); - for (FieldSchema column: table.getSd().getCols()) { + for (FieldSchema column : table.getSd().getCols()) { actualColumnNames.add(column.getName()); } assertEquals(expectedColumnNames, actualColumnNames); @@ -281,17 +250,23 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { ArrayList partitionFields = new ArrayList<>(); String[] groups = encodedPartition.split("/"); - for (String group: groups) { + for (String group : groups) { String field = group.split("=")[1]; partitionFields.add(field); } - ArrayList expectedResult = new ArrayList<>(); - for (int i = 16; i <= 18; ++i) { - String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2", - partitionFields.get(0), partitionFields.get(1), partitionFields.get(2)}; - for (int j = 0; j < 3; ++j) { - expectedResult.add(part); + List> expectedResults = new ArrayList<>(); + for (int j = 0; j < 3; ++j) { + for (int i = 0; i < 3; ++i) { + List result = Arrays.asList("true", + String.valueOf(16 + i), + String.valueOf((long) (16 + i)), + String.valueOf(12.2f + i), + String.valueOf((double) (12.2f + i)), + partitionFields.get(0), + partitionFields.get(1), + partitionFields.get(2)); + expectedResults.add(result); } } @@ -300,38 +275,10 @@ public void testHiveIntegrationTimeBasedPartitionerParquet() throws Exception { assertEquals(9, rows.length); for (int i = 0; i < rows.length; ++i) { String[] parts = HiveTestUtils.parseOutput(rows[i]); - for (int j = 0; j < expectedResult.get(i).length; ++j) { - assertEquals(expectedResult.get(i)[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResults.get(i)) { + assertEquals(expectedValue, parts[j++]); } } } - - private Struct[] createRecords(Schema schema) { - Struct record1 = new Struct(schema) - .put("boolean", true) - .put("int", 16) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - Struct record2 = new Struct(schema) - .put("boolean", true) - .put("int", 17) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - Struct record3 = new Struct(schema) - .put("boolean", true) - .put("int", 18) - .put("long", 12L) - .put("float", 12.2f) - .put("double", 12.2); - - ArrayList records = new ArrayList<>(); - records.add(record1); - records.add(record2); - records.add(record3); - return records.toArray(new Struct[records.size()]); - } } diff --git a/src/test/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtilTest.java b/src/test/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtilTest.java index 1df3455..2467cf9 100644 --- a/src/test/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtilTest.java +++ b/src/test/java/io/confluent/connect/hdfs/parquet/ParquetHiveUtilTest.java @@ -69,9 +69,12 @@ public void testCreateTable() throws Exception { String location = "partition=" + String.valueOf(PARTITION); hiveMetaStore.addPartition(hiveDatabase, TOPIC, location); + Struct expectedRecord = createRecord(schema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); @@ -85,15 +88,15 @@ public void testCreateTable() throws Exception { assertEquals(1, partitionCols.size()); assertEquals("partition", partitionCols.get(0).getName()); - String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "12"}; - String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC); + String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC); String[] rows = result.split("\n"); // Only 6 of the 7 records should have been delivered due to flush_size = 3 assertEquals(6, rows.length); - for (String row: rows) { + for (String row : rows) { String[] parts = HiveTestUtils.parseOutput(row); - for (int j = 0; j < expectedResult.length; ++j) { - assertEquals(expectedResult[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResult) { + assertEquals(expectedValue, parts[j++]); } } } @@ -108,9 +111,13 @@ public void testAlterSchema() throws Exception { String location = "partition=" + String.valueOf(PARTITION); hiveMetaStore.addPartition(hiveDatabase, TOPIC, location); + Schema newSchema = createNewSchema(); + Struct expectedRecord = createRecord(newSchema); + List expectedResult = new ArrayList<>(); List expectedColumnNames = new ArrayList<>(); - for (Field field: schema.fields()) { + for (Field field : schema.fields()) { expectedColumnNames.add(field.name()); + expectedResult.add(String.valueOf(expectedRecord.get(field.name()))); } Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC); @@ -121,19 +128,17 @@ public void testAlterSchema() throws Exception { assertEquals(expectedColumnNames, actualColumnNames); - Schema newSchema = createNewSchema(); - hive.alterSchema(hiveDatabase, TOPIC, newSchema); - String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "NULL", "12"}; String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC); String[] rows = result.split("\n"); // Only 6 of the 7 records should have been delivered due to flush_size = 3 assertEquals(6, rows.length); - for (String row: rows) { + for (String row : rows) { String[] parts = HiveTestUtils.parseOutput(row); - for (int j = 0; j < expectedResult.length; ++j) { - assertEquals(expectedResult[j], parts[j]); + int j = 0; + for (String expectedValue : expectedResult) { + assertEquals(expectedValue, parts[j++]); } } }