-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSparkWeeklyBuild_config.xml
More file actions
136 lines (111 loc) · 4.61 KB
/
SparkWeeklyBuild_config.xml
File metadata and controls
136 lines (111 loc) · 4.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description></description>
<keepDependencies>false</keepDependencies>
<properties/>
<scm class="hudson.scm.NullSCM"/>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<triggers>
<hudson.triggers.TimerTrigger>
<spec>0 8 * * 1
</spec>
</hudson.triggers.TimerTrigger>
</triggers>
<concurrentBuild>false</concurrentBuild>
<builders>
<hudson.tasks.Shell>
<command>#!/bin/bash
workDir=/root/WeeklyValidation
IPbaremetal=10.77.67.159
userName=$(grep -Po '(?<=userName=).*' ${workDir}/baremetalMachines/${IPbaremetal})
passWord=$(grep -Po '(?<=passWord=).*' ${workDir}/baremetalMachines/${IPbaremetal})
ssh ${userName}@${IPbaremetal} /bin/bash <<'EOF'
echo "These commands will be run on: $( uname -a )"
echo "They are executed by: $( whoami )"
cd WeeklyValidation
workDirR=$(pwd)
export SNAPPY_HOME=/usr/lib
export LEVELDB_HOME=${workDirR}/leveldb
export LEVELDBJNI_HOME=${workDirR}/leveldbjni
export LIBRARY_PATH=${SNAPPY_HOME}
export C_INCLUDE_PATH=${LIBRARY_PATH}
export CPLUS_INCLUDE_PATH=${LIBRARY_PATH}
IPbaremetalR=10.77.67.159
cd baremetalMachines/
FunctionalTests=$(grep -Po '(?<=FunctionalTests=).*' ${IPbaremetalR})
PythonTests=$(grep -Po '(?<=PythonTests=).*' ${IPbaremetalR})
RTests=$(grep -Po '(?<=RTests=).*' ${IPbaremetalR})
jdk_val=$(grep -Po '(?<=JDK_VAL=).*' ${IPbaremetalR})
branchClone=$(grep -Po '(?<=branchClone=).*' ${IPbaremetalR})
hiveBuild=$(grep -Po '(?<=buildWithHive=).*' ${IPbaremetalR})
hadoopVer=$(grep -Po '(?<=hadoopVer=).*' ${IPbaremetalR})
cd ${workDirR}
if [ $hiveBuild == TRUE ]
then
hiveFlag=with
elif [ $hiveBuild == FALSE ]
then
hiveFlag=without
fi
echo -en '#Creating workspace directories for jobs\n'
mkdir -p ${workDirR}/workspace/${IPbaremetalR}-SparkBranch-${branchClone}-Hadoop-${hadoopVer}-${hiveFlag}-Hive-${jdk_val}-WeeklyBuild
cd ${workDirR}/workspace/${IPbaremetalR}-SparkBranch-${branchClone}-Hadoop-${hadoopVer}-${hiveFlag}-Hive-${jdk_val}-WeeklyBuild
rm -rf spark
git clone --recursive --depth 1 https://github.com/apache/spark.git -b branch-${branchClone}
cd spark
if [ ${jdk_val} = "OPENJDK" ]
then
if [ "$(. /etc/os-release; echo $NAME)" = "Ubuntu" ]; then
echo -en "Setting OpenJDK path and JAVA_HOME\n"
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-ppc64el
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
else
echo -en "Setting OpenJDK path and JAVA_HOME\n"
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
fi
elif [ ${jdk_val} = "IBMJDK" ]
then
export JAVA_HOME=$(grep -Po '(?<=USER_INSTALL_DIR=).*' ${workDirR}/installer.properties)
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
fi
java -version
echo $hiveBuild
if [ $hiveBuild == TRUE ]
then
echo " Building with Hive and JDBC Support \n "
#build/mvn -Pyarn -Phadoop-${hadoopVer} -Psparkr -Dhadoop.version=${hadoopVer}.0 -Phive -Phive-thriftserver -DskipTests clean package
./dev/make-distribution.sh --name custom-${branchClone}-spark --tgz -Psparkr -Phadoop-${hadoopVer} -Phive -Phive-thriftserver -Pyarn
elif [ $hiveBuild == FALSE ]
then
echo " Building without Hive and JDBC Support \n "
#build/mvn -Pyarn -Phadoop-${hadoopVer} -Psparkr -Dhadoop.version=${hadoopVer}.0 -DskipTests clean package
./dev/make-distribution.sh --name custom-${branchClone}-spark --tgz -Psparkr -Phadoop-${hadoopVer} -Pyarn
fi
EOF
#export '_JAVA_OPTIONS=-XX:-UseGCOverheadLimit -Xms512m -Xmx2048m'
#echo "==============Clone Spark existing release======================"
# Build Spark
# This enables yarn and hadoop profiles.
# We do not specify a yarn.version and assume it is same as hadoop.version
#
# A hadoop.version must be compatible with the hadoop profile. For this
# reason, we only build against version 2.6.0.
#
# Run tests
#build/mvn --fail-never -Pyarn -Phadoop-2.7 -Dhadoop.version=2.7.0 test
#python/run-tests
#R/run-tests.sh
#Backup log with timestamp
now=$(date +"%d-%m-%Y_%H:%M:%S")
cp /var/lib/jenkins/jobs/${JOB_NAME}/builds/${BUILD_NUMBER}/log ${workDir}/logs/${JOB_NAME}/${JOB_NAME}_${now}.log
</command>
</hudson.tasks.Shell>
</builders>
<publishers/>
<buildWrappers/>
</project>