Skip to content
This repository was archived by the owner on Apr 22, 2024. It is now read-only.

Commit 02a6968

Browse files
committed
e2e-Record Splitter Test Scenarios
1 parent 0e7d7b9 commit 02a6968

23 files changed

Lines changed: 1195 additions & 1 deletion

pom.xml

Lines changed: 132 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
<cdap.version>6.1.0-SNAPSHOT</cdap.version>
3131
<guava.version>19.0</guava.version>
3232
<exported-packages>io.cdap.plugin.record.splitter.*</exported-packages>
33+
<testSourceLocation>${project.basedir}/src/test/java/</testSourceLocation>
3334
</properties>
3435

3536
<repositories>
@@ -76,6 +77,7 @@
7677
</dependencies>
7778

7879
<build>
80+
<testSourceDirectory>${testSourceLocation}</testSourceDirectory>
7981
<pluginManagement>
8082
<plugins>
8183
<plugin>
@@ -160,6 +162,7 @@
160162
<exclude>**/org/apache/hadoop/**</exclude>
161163
<!-- exclude resource files-->
162164
<exclude>**/resources/**</exclude>
165+
<exclude>src/e2e-test/features/**</exclude>
163166
</excludes>
164167
</configuration>
165168
</execution>
@@ -242,5 +245,133 @@
242245
</plugin>
243246
</plugins>
244247
</build>
245-
</project>
246248

249+
<profiles>
250+
<profile>
251+
<id>e2e-tests</id>
252+
<properties>
253+
<testSourceLocation>src/e2e-test/java</testSourceLocation>
254+
<TEST_RUNNER>TestRunner.java</TEST_RUNNER>
255+
</properties>
256+
<build>
257+
<testResources>
258+
<testResource>
259+
<directory>src/e2e-test/resources</directory>
260+
</testResource>
261+
</testResources>
262+
<plugins>
263+
<plugin>
264+
<groupId>org.apache.maven.plugins</groupId>
265+
<artifactId>maven-surefire-plugin</artifactId>
266+
<version>2.18.1</version>
267+
<configuration>
268+
<skipTests>true</skipTests>
269+
</configuration>
270+
</plugin>
271+
272+
<plugin>
273+
<groupId>org.apache.maven.plugins</groupId>
274+
<artifactId>maven-failsafe-plugin</artifactId>
275+
<version>3.0.0-M5</version>
276+
<configuration>
277+
<includes>
278+
<include>${TEST_RUNNER}</include>
279+
</includes>
280+
<!--Start configuration to run TestRunners in parallel-->
281+
<parallel>classes</parallel> <!--Running TestRunner classes in parallel-->
282+
<threadCount>2</threadCount> <!--Number of classes to run in parallel-->
283+
<forkCount>2</forkCount> <!--Number of JVM processes -->
284+
<reuseForks>true</reuseForks>
285+
<!--End configuration to run TestRunners in parallel-->
286+
<environmentVariables>
287+
<GOOGLE_APPLICATION_CREDENTIALS>
288+
${GOOGLE_APPLICATION_CREDENTIALS}
289+
</GOOGLE_APPLICATION_CREDENTIALS>
290+
<SERVICE_ACCOUNT_TYPE>
291+
${SERVICE_ACCOUNT_TYPE}
292+
</SERVICE_ACCOUNT_TYPE>
293+
<SERVICE_ACCOUNT_FILE_PATH>
294+
${SERVICE_ACCOUNT_FILE_PATH}
295+
</SERVICE_ACCOUNT_FILE_PATH>
296+
<SERVICE_ACCOUNT_JSON>
297+
${SERVICE_ACCOUNT_JSON}
298+
</SERVICE_ACCOUNT_JSON>
299+
</environmentVariables>
300+
</configuration>
301+
<executions>
302+
<execution>
303+
<goals>
304+
<goal>integration-test</goal>
305+
</goals>
306+
</execution>
307+
</executions>
308+
</plugin>
309+
310+
<plugin>
311+
<groupId>net.masterthought</groupId>
312+
<artifactId>maven-cucumber-reporting</artifactId>
313+
<version>5.5.0</version>
314+
315+
<executions>
316+
<execution>
317+
<id>execution</id>
318+
<phase>verify</phase>
319+
<goals>
320+
<goal>generate</goal>
321+
</goals>
322+
<configuration>
323+
<projectName>Cucumber Reports</projectName> <!-- Replace with project name -->
324+
<outputDirectory>target/cucumber-reports/advanced-reports</outputDirectory>
325+
<buildNumber>1</buildNumber>
326+
<skip>false</skip>
327+
<inputDirectory>${project.build.directory}/cucumber-reports</inputDirectory>
328+
<jsonFiles> <!-- supports wildcard or name pattern -->
329+
<param>**/*.json</param>
330+
</jsonFiles> <!-- optional, defaults to outputDirectory if not specified -->
331+
<classificationDirectory>${project.build.directory}/cucumber-reports</classificationDirectory>
332+
<checkBuildResult>true</checkBuildResult>
333+
</configuration>
334+
</execution>
335+
</executions>
336+
</plugin>
337+
</plugins>
338+
</build>
339+
340+
<dependencies>
341+
<dependency>
342+
<groupId>io.cdap.tests.e2e</groupId>
343+
<artifactId>cdap-e2e-framework</artifactId>
344+
<version>0.0.1-SNAPSHOT</version>
345+
<scope>test</scope>
346+
</dependency>
347+
<dependency>
348+
<groupId>ch.qos.logback</groupId>
349+
<artifactId>logback-classic</artifactId>
350+
<version>1.2.8</version>
351+
<scope>runtime</scope>
352+
</dependency>
353+
<dependency>
354+
<groupId>com.google.guava</groupId>
355+
<artifactId>guava</artifactId>
356+
<version>27.0.1-jre</version>
357+
</dependency>
358+
<dependency>
359+
<groupId>io.cdap.cdap</groupId>
360+
<artifactId>cdap-unit-test</artifactId>
361+
<version>${cdap.version}</version>
362+
<scope>test</scope>
363+
<exclusions>
364+
<exclusion>
365+
<groupId>org.apache.hive</groupId>
366+
<artifactId>hive-exec</artifactId>
367+
</exclusion>
368+
<exclusion>
369+
<groupId>io.cdap.cdap</groupId>
370+
<artifactId>cdap-explore-jdbc</artifactId>
371+
</exclusion>
372+
</exclusions>
373+
</dependency>
374+
</dependencies>
375+
</profile>
376+
</profiles>
377+
</project>
Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
@RecordSplitter
2+
Feature: RecordSplitter transform - Verify BigQuery source data transfer using RecordSplitter transformation
3+
4+
@BQ_SOURCE_DELIMITED_TEST @BQ_SINK_TEST
5+
Scenario: To verify data is getting transferred from BigQuery to BigQuery sink plugin successfully with record splitter
6+
Given Open Datafusion Project to configure pipeline
7+
When Select plugin: "BigQuery" from the plugins list as: "Source"
8+
When Expand Plugin group in the LHS plugins list: "Transform"
9+
When Select plugin: "Record Splitter" from the plugins list as: "Transform"
10+
Then Connect plugins: "BigQuery" and "RecordSplitter" to establish connection
11+
When Expand Plugin group in the LHS plugins list: "Sink"
12+
When Select plugin: "BigQuery" from the plugins list as: "Sink"
13+
Then Connect plugins: "RecordSplitter" and "BigQuery2" to establish connection
14+
Then Navigate to the properties page of plugin: "BigQuery"
15+
Then Replace input plugin property: "project" with value: "projectId"
16+
Then Enter input plugin property: "datasetProject" with value: "projectId"
17+
Then Override Service account details if set in environment variables
18+
Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
19+
Then Enter input plugin property: "dataset" with value: "dataset"
20+
Then Enter input plugin property: "table" with value: "bqSourceTable"
21+
Then Click on the Get Schema button
22+
Then Validate "BigQuery" plugin properties
23+
Then Close the Plugin Properties page
24+
Then Navigate to the properties page of plugin: "RecordSplitter"
25+
Then Enter input plugin property: "fieldToSplit" with value: "recordSplitterFieldToSplit"
26+
Then Enter input plugin property: "delimiter" with value: "recordSplitterSemiColonDelimiter"
27+
Then Enter input plugin property: "outputField" with value: "recordSplitterOutputField"
28+
Then Select Record Splitter plugin output schema action: "clear"
29+
Then Enter Record Splitter plugin outputSchema "recordSplitterValidOutputSchema"
30+
Then Validate "RecordSplitter" plugin properties
31+
Then Close the Plugin Properties page
32+
Then Navigate to the properties page of plugin: "BigQuery2"
33+
Then Replace input plugin property: "project" with value: "projectId"
34+
Then Enter input plugin property: "datasetProject" with value: "projectId"
35+
Then Override Service account details if set in environment variables
36+
Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
37+
Then Enter input plugin property: "dataset" with value: "dataset"
38+
Then Enter input plugin property: "table" with value: "bqTargetTable"
39+
Then Click plugin property: "truncateTable"
40+
Then Click plugin property: "updateTableSchema"
41+
Then Validate "BigQuery2" plugin properties
42+
Then Close the Plugin Properties page
43+
Then Save the pipeline
44+
Then Preview and run the pipeline
45+
Then Wait till pipeline preview is in running state
46+
Then Open and capture pipeline preview logs
47+
Then Verify the preview run status of pipeline in the logs is "succeeded"
48+
Then Close the pipeline logs
49+
Then Close the preview
50+
Then Deploy the pipeline
51+
Then Run the Pipeline in Runtime
52+
Then Wait till pipeline is in running state
53+
Then Open and capture logs
54+
Then Verify the pipeline status is "Succeeded"
55+
Then Close the pipeline logs
56+
Then Validate OUT record count is equal to records transferred to target BigQuery table
57+
58+
@BQ_SOURCE_DELIMITED_TEST @FILE_SINK_TEST
59+
Scenario: To verify data is getting transferred from BigQuery to File sink plugin successfully with comma separated value
60+
Given Open Datafusion Project to configure pipeline
61+
When Select plugin: "BigQuery" from the plugins list as: "Source"
62+
When Expand Plugin group in the LHS plugins list: "Transform"
63+
When Select plugin: "Record Splitter" from the plugins list as: "Transform"
64+
Then Connect plugins: "BigQuery" and "RecordSplitter" to establish connection
65+
When Expand Plugin group in the LHS plugins list: "Sink"
66+
When Select plugin: "File" from the plugins list as: "Sink"
67+
Then Connect plugins: "RecordSplitter" and "File" to establish connection
68+
Then Navigate to the properties page of plugin: "BigQuery"
69+
Then Replace input plugin property: "project" with value: "projectId"
70+
Then Enter input plugin property: "datasetProject" with value: "projectId"
71+
Then Override Service account details if set in environment variables
72+
Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
73+
Then Enter input plugin property: "dataset" with value: "dataset"
74+
Then Enter input plugin property: "table" with value: "bqSourceTable"
75+
Then Click on the Get Schema button
76+
Then Capture the generated Output Schema
77+
Then Validate "BigQuery" plugin properties
78+
Then Close the Plugin Properties page
79+
Then Navigate to the properties page of plugin: "RecordSplitter"
80+
Then Enter input plugin property: "fieldToSplit" with value: "recordSplitterFieldToSplit"
81+
Then Enter input plugin property: "delimiter" with value: "recordSplitterCommaDelimiter"
82+
Then Enter input plugin property: "outputField" with value: "recordSplitterOutputField"
83+
Then Select Record Splitter plugin output schema action: "clear"
84+
Then Enter Record Splitter plugin outputSchema "recordSplitterValidOutputSchema"
85+
Then Validate "RecordSplitter" plugin properties
86+
Then Close the Plugin Properties page
87+
Then Navigate to the properties page of plugin: "File"
88+
Then Enter input plugin property: "referenceName" with value: "FileReferenceName"
89+
Then Enter input plugin property: "path" with value: "filePluginOutputFolder"
90+
Then Replace input plugin property: "pathSuffix" with value: "yyyy-MM-dd-HH-mm-ss"
91+
Then Select dropdown plugin property: "format" with option value: "tsv"
92+
Then Click plugin property: "writeHeader"
93+
Then Validate "File" plugin properties
94+
Then Close the Plugin Properties page
95+
Then Save the pipeline
96+
Then Preview and run the pipeline
97+
Then Wait till pipeline preview is in running state
98+
Then Open and capture pipeline preview logs
99+
Then Verify the preview run status of pipeline in the logs is "succeeded"
100+
Then Close the pipeline logs
101+
Then Close the preview
102+
Then Deploy the pipeline
103+
Then Run the Pipeline in Runtime
104+
Then Wait till pipeline is in running state
105+
Then Open and capture logs
106+
Then Verify the pipeline status is "Succeeded"
107+
Then Close the pipeline logs
108+
Then Validate output records in output folder path "filePluginOutputFolder" is equal to expected output file "recordSplitterCommaOutputFile"
109+
110+
@BQ_SOURCE_DELIMITED_TEST @FILE_SINK_TEST
111+
Scenario: To verify data is getting transferred from BigQuery to File sink plugin successfully with at the rate separated value
112+
Given Open Datafusion Project to configure pipeline
113+
When Select plugin: "BigQuery" from the plugins list as: "Source"
114+
When Expand Plugin group in the LHS plugins list: "Transform"
115+
When Select plugin: "Record Splitter" from the plugins list as: "Transform"
116+
Then Connect plugins: "BigQuery" and "RecordSplitter" to establish connection
117+
When Expand Plugin group in the LHS plugins list: "Sink"
118+
When Select plugin: "File" from the plugins list as: "Sink"
119+
Then Connect plugins: "RecordSplitter" and "File" to establish connection
120+
Then Navigate to the properties page of plugin: "BigQuery"
121+
Then Replace input plugin property: "project" with value: "projectId"
122+
Then Enter input plugin property: "datasetProject" with value: "projectId"
123+
Then Override Service account details if set in environment variables
124+
Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
125+
Then Enter input plugin property: "dataset" with value: "dataset"
126+
Then Enter input plugin property: "table" with value: "bqSourceTable"
127+
Then Click on the Get Schema button
128+
Then Capture the generated Output Schema
129+
Then Validate "BigQuery" plugin properties
130+
Then Close the Plugin Properties page
131+
Then Navigate to the properties page of plugin: "RecordSplitter"
132+
Then Enter input plugin property: "fieldToSplit" with value: "recordSplitterFieldToSplit"
133+
Then Enter input plugin property: "delimiter" with value: "recordSplitterAtTheRateDelimiter"
134+
Then Enter input plugin property: "outputField" with value: "recordSplitterOutputField"
135+
Then Select Record Splitter plugin output schema action: "clear"
136+
Then Enter Record Splitter plugin outputSchema "recordSplitterValidOutputSchema"
137+
Then Validate "RecordSplitter" plugin properties
138+
Then Close the Plugin Properties page
139+
Then Navigate to the properties page of plugin: "File"
140+
Then Enter input plugin property: "referenceName" with value: "FileReferenceName"
141+
Then Enter input plugin property: "path" with value: "filePluginOutputFolder"
142+
Then Replace input plugin property: "pathSuffix" with value: "yyyy-MM-dd-HH-mm-ss"
143+
Then Select dropdown plugin property: "format" with option value: "tsv"
144+
Then Click plugin property: "writeHeader"
145+
Then Validate "File" plugin properties
146+
Then Close the Plugin Properties page
147+
Then Save the pipeline
148+
Then Preview and run the pipeline
149+
Then Wait till pipeline preview is in running state
150+
Then Open and capture pipeline preview logs
151+
Then Verify the preview run status of pipeline in the logs is "succeeded"
152+
Then Close the pipeline logs
153+
Then Close the preview
154+
Then Deploy the pipeline
155+
Then Run the Pipeline in Runtime
156+
Then Wait till pipeline is in running state
157+
Then Open and capture logs
158+
Then Verify the pipeline status is "Succeeded"
159+
Then Close the pipeline logs
160+
Then Validate output records in output folder path "filePluginOutputFolder" is equal to expected output file "recordSplitterAtTheRateOutputFile"

0 commit comments

Comments
 (0)