Add dependencies locally

This commit is contained in:
Ahrimdon
2024-02-27 03:09:30 -05:00
parent 1679ef60cc
commit 70e8a8502b
5698 changed files with 2770161 additions and 12 deletions

View File

@ -0,0 +1,143 @@
# This Dockerfile specifies the recipe for creating an image for the tests
# to run in.
#
# We install as many test dependencies here as we can, because these setup
# steps can be cached. They do *not* run every time we run the build.
# The Docker image is only rebuilt when the Dockerfile (ie. this file)
# changes.
# Base Dockerfile for gRPC dev images
FROM 32bit/debian:latest
# Apt source for php
RUN echo "deb http://ppa.launchpad.net/ondrej/php/ubuntu trusty main" | tee /etc/apt/sources.list.d/various-php.list && \
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F4FCBB07
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
unzip \
# -- For python --
python-setuptools \
python-pip \
python-dev \
# -- For C++ benchmarks --
cmake \
# -- For PHP --
php5.5 \
php5.5-dev \
php5.5-xml \
php5.6 \
php5.6-dev \
php5.6-xml \
php7.0 \
php7.0-dev \
php7.0-xml \
phpunit \
valgrind \
libxml2-dev \
&& apt-get clean
##################
# PHP dependencies.
RUN wget http://am1.php.net/get/php-5.5.38.tar.bz2/from/this/mirror
RUN mv mirror php-5.5.38.tar.bz2
RUN tar -xvf php-5.5.38.tar.bz2
RUN cd php-5.5.38 && ./configure --enable-maintainer-zts --prefix=/usr/local/php-5.5-zts && \
make && make install && make clean && cd ..
RUN cd php-5.5.38 && make clean && ./configure --enable-bcmath --prefix=/usr/local/php-5.5 && \
make && make install && make clean && cd ..
RUN wget http://am1.php.net/get/php-5.6.30.tar.bz2/from/this/mirror
RUN mv mirror php-5.6.30.tar.bz2
RUN tar -xvf php-5.6.30.tar.bz2
RUN cd php-5.6.30 && ./configure --enable-maintainer-zts --prefix=/usr/local/php-5.6-zts && \
make && make install && cd ..
RUN cd php-5.6.30 && make clean && ./configure --enable-bcmath --prefix=/usr/local/php-5.6 && \
make && make install && cd ..
RUN wget http://am1.php.net/get/php-7.0.18.tar.bz2/from/this/mirror
RUN mv mirror php-7.0.18.tar.bz2
RUN tar -xvf php-7.0.18.tar.bz2
RUN cd php-7.0.18 && ./configure --enable-maintainer-zts --prefix=/usr/local/php-7.0-zts && \
make && make install && cd ..
RUN cd php-7.0.18 && make clean && ./configure --enable-bcmath --prefix=/usr/local/php-7.0 && \
make && make install && cd ..
RUN wget http://am1.php.net/get/php-7.1.4.tar.bz2/from/this/mirror
RUN mv mirror php-7.1.4.tar.bz2
RUN tar -xvf php-7.1.4.tar.bz2
RUN cd php-7.1.4 && ./configure --enable-maintainer-zts --prefix=/usr/local/php-7.1-zts && \
make && make install && cd ..
RUN cd php-7.1.4 && make clean && ./configure --enable-bcmath --prefix=/usr/local/php-7.1 && \
make && make install && cd ..
RUN php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
RUN php composer-setup.php
RUN mv composer.phar /usr/bin/composer
RUN php -r "unlink('composer-setup.php');"
RUN composer config -g -- disable-tls true
RUN composer config -g -- secure-http false
RUN cd /tmp && \
git clone https://github.com/google/protobuf.git && \
cd protobuf/php && \
git reset --hard 49b44bff2b6257a119f9c6a342d6151c736586b8 && \
ln -sfn /usr/local/php-5.5/bin/php /usr/bin/php && \
ln -sfn /usr/local/php-5.5/bin/php-config /usr/bin/php-config && \
ln -sfn /usr/local/php-5.5/bin/phpize /usr/bin/phpize && \
composer install && \
mv vendor /usr/local/vendor-5.5 && \
ln -sfn /usr/local/php-5.6/bin/php /usr/bin/php && \
ln -sfn /usr/local/php-5.6/bin/php-config /usr/bin/php-config && \
ln -sfn /usr/local/php-5.6/bin/phpize /usr/bin/phpize && \
composer install && \
mv vendor /usr/local/vendor-5.6 && \
ln -sfn /usr/local/php-7.0/bin/php /usr/bin/php && \
ln -sfn /usr/local/php-7.0/bin/php-config /usr/bin/php-config && \
ln -sfn /usr/local/php-7.0/bin/phpize /usr/bin/phpize && \
composer install && \
mv vendor /usr/local/vendor-7.0 && \
ln -sfn /usr/local/php-7.1/bin/php /usr/bin/php && \
ln -sfn /usr/local/php-7.1/bin/php-config /usr/bin/php-config && \
ln -sfn /usr/local/php-7.1/bin/phpize /usr/bin/phpize && \
composer install && \
mv vendor /usr/local/vendor-7.1
##################
# Python dependencies
# These packages exist in apt-get, but their versions are too old, so we have
# to get updates from pip.
RUN pip install pip --upgrade
RUN pip install virtualenv tox yattag
##################
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
# Define the default command.
CMD ["bash"]

View File

@ -0,0 +1,18 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request 32" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/php_32bit
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="php_all_32"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/32-bit/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/32-bit/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,225 @@
# This Dockerfile specifies the recipe for creating an image for the tests
# to run in.
#
# We install as many test dependencies here as we can, because these setup
# steps can be cached. They do *not* run every time we run the build.
# The Docker image is only rebuilt when the Dockerfile (ie. this file)
# changes.
# Base Dockerfile for gRPC dev images
FROM debian:latest
# Apt source for old Python versions.
RUN echo 'deb http://ppa.launchpad.net/fkrull/deadsnakes/ubuntu trusty main' > /etc/apt/sources.list.d/deadsnakes.list && \
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys DB82666C
# Apt source for Oracle Java.
RUN echo 'deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main' > /etc/apt/sources.list.d/webupd8team-java-trusty.list && \
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EEA14886 && \
echo "oracle-java7-installer shared/accepted-oracle-license-v1-1 select true" | debconf-set-selections
# Apt source for Mono
RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list && \
echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list && \
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
# Apt source for php
RUN echo "deb http://ppa.launchpad.net/ondrej/php/ubuntu trusty main" | tee /etc/apt/sources.list.d/various-php.list && \
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F4FCBB07
# Install dotnet SDK based on https://www.microsoft.com/net/core#debian
# (Ubuntu instructions need apt to support https)
RUN apt-get update && apt-get install -y --force-yes curl libunwind8 gettext && \
curl -sSL -o dotnet.tar.gz https://go.microsoft.com/fwlink/?LinkID=847105 && \
mkdir -p /opt/dotnet && tar zxf dotnet.tar.gz -C /opt/dotnet && \
ln -s /opt/dotnet/dotnet /usr/local/bin
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
# -- For csharp --
mono-devel \
referenceassemblies-pcl \
nunit \
# -- For all Java builds -- \
maven \
# -- For java_jdk7 -- \
openjdk-7-jdk \
# -- For java_oracle7 -- \
oracle-java7-installer \
# -- For python / python_cpp -- \
python-setuptools \
python-pip \
python-dev \
python2.6-dev \
python3.3-dev \
python3.4-dev \
# -- For C++ benchmarks --
cmake \
# -- For PHP --
php5.6 \
php5.6-dev \
php5.6-xml \
php7.0 \
php7.0-dev \
php7.0-xml \
phpunit \
valgrind \
libxml2-dev \
&& apt-get clean
##################
# C# dependencies
RUN wget www.nuget.org/NuGet.exe -O /usr/local/bin/nuget.exe
##################
# Python dependencies
# These packages exist in apt-get, but their versions are too old, so we have
# to get updates from pip.
RUN pip install pip --upgrade
RUN pip install virtualenv tox yattag
##################
# Java dependencies
# This step requires compiling protoc. :(
ENV MAVEN_REPO /var/maven_local_repository
ENV MVN mvn --batch-mode
RUN cd /tmp && \
git clone https://github.com/google/protobuf.git && \
cd protobuf && \
git reset --hard 129a6e2aca95dcfb6c3e717d7b9cca1f104fde39 && \
./autogen.sh && \
./configure && \
make -j4 && \
cd java && \
$MVN install dependency:go-offline -Dmaven.repo.local=$MAVEN_REPO && \
cd ../javanano && \
$MVN install dependency:go-offline -Dmaven.repo.local=$MAVEN_REPO
##################
# PHP dependencies.
RUN wget http://am1.php.net/get/php-5.5.38.tar.bz2/from/this/mirror
RUN mv mirror php-5.5.38.tar.bz2
RUN tar -xvf php-5.5.38.tar.bz2
RUN cd php-5.5.38 && ./configure --enable-maintainer-zts --prefix=/usr/local/php-5.5-zts && \
make && make install && cd ..
RUN cd php-5.5.38 && make clean && ./configure --enable-bcmath --prefix=/usr/local/php-5.5 && \
make && make install && cd ..
RUN wget http://am1.php.net/get/php-5.6.30.tar.bz2/from/this/mirror
RUN mv mirror php-5.6.30.tar.bz2
RUN tar -xvf php-5.6.30.tar.bz2
RUN cd php-5.6.30 && ./configure --enable-maintainer-zts --prefix=/usr/local/php-5.6-zts && \
make && make install && cd ..
RUN cd php-5.6.30 && make clean && ./configure --enable-bcmath --prefix=/usr/local/php-5.6 && \
make && make install && cd ..
RUN wget http://am1.php.net/get/php-7.0.18.tar.bz2/from/this/mirror
RUN mv mirror php-7.0.18.tar.bz2
RUN tar -xvf php-7.0.18.tar.bz2
RUN cd php-7.0.18 && ./configure --enable-maintainer-zts --prefix=/usr/local/php-7.0-zts && \
make && make install && cd ..
RUN cd php-7.0.18 && make clean && ./configure --enable-bcmath --prefix=/usr/local/php-7.0 && \
make && make install && cd ..
RUN wget http://am1.php.net/get/php-7.1.4.tar.bz2/from/this/mirror
RUN mv mirror php-7.1.4.tar.bz2
RUN tar -xvf php-7.1.4.tar.bz2
RUN cd php-7.1.4 && ./configure --enable-maintainer-zts --prefix=/usr/local/php-7.1-zts && \
make && make install && cd ..
RUN cd php-7.1.4 && make clean && ./configure --enable-bcmath --prefix=/usr/local/php-7.1 && \
make && make install && cd ..
RUN php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
RUN php composer-setup.php
RUN mv composer.phar /usr/bin/composer
RUN php -r "unlink('composer-setup.php');"
RUN composer config -g -- disable-tls true
RUN composer config -g -- secure-http false
RUN cd /tmp && \
rm -rf protobuf && \
git clone https://github.com/google/protobuf.git && \
cd protobuf && \
git reset --hard 49b44bff2b6257a119f9c6a342d6151c736586b8 && \
cd php && \
ln -sfn /usr/local/php-5.5/bin/php /usr/bin/php && \
ln -sfn /usr/local/php-5.5/bin/php-config /usr/bin/php-config && \
ln -sfn /usr/local/php-5.5/bin/phpize /usr/bin/phpize && \
composer install && \
mv vendor /usr/local/vendor-5.5 && \
ln -sfn /usr/local/php-5.6/bin/php /usr/bin/php && \
ln -sfn /usr/local/php-5.6/bin/php-config /usr/bin/php-config && \
ln -sfn /usr/local/php-5.6/bin/phpize /usr/bin/phpize && \
composer install && \
mv vendor /usr/local/vendor-5.6 && \
ln -sfn /usr/local/php-7.0/bin/php /usr/bin/php && \
ln -sfn /usr/local/php-7.0/bin/php-config /usr/bin/php-config && \
ln -sfn /usr/local/php-7.0/bin/phpize /usr/bin/phpize && \
composer install && \
mv vendor /usr/local/vendor-7.0 && \
ln -sfn /usr/local/php-7.1/bin/php /usr/bin/php && \
ln -sfn /usr/local/php-7.1/bin/php-config /usr/bin/php-config && \
ln -sfn /usr/local/php-7.1/bin/phpize /usr/bin/phpize && \
composer install && \
mv vendor /usr/local/vendor-7.1
##################
# Go dependencies.
RUN apt-get install -y \
# -- For go -- \
golang
##################
# Javascript dependencies.
RUN apt-get install -y \
# -- For javascript -- \
npm
##################
# Python 3.5 3.6 dependencies.
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
python3.5-dev \
python3.6-dev \
&& apt-get clean
# On Debian/Ubuntu, nodejs binary is named 'nodejs' because the name 'node'
# is taken by another legacy binary. We don't have that legacy binary and
# npm expects the binary to be named 'node', so we just create a symbol
# link here.
RUN ln -s `which nodejs` /usr/bin/node
##################
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
# Define the default command.
CMD ["bash"]

View File

@ -0,0 +1,21 @@
#!/bin/bash
#
# Builds protobuf C++ with aarch64 crosscompiler and runs a basic set of tests under an emulator.
# NOTE: This script is expected to run under the dockcross/linux-arm64 docker image.
set -ex
mkdir -p cmake/crossbuild_aarch64
cd cmake/crossbuild_aarch64
# the build commands are expected to run under dockcross docker image
# where the CC, CXX and other toolchain variables already point to the crosscompiler
cmake ..
make -j8
# check that the resulting test binary is indeed an aarch64 ELF
(file ./tests | grep -q "ELF 64-bit LSB executable, ARM aarch64") || (echo "Test binary in not an aarch64 binary"; exit 1)
# run the basic set of C++ tests under QEMU
# there are other tests we could run (e.g. ./lite-test), but this is sufficient as a smoketest
qemu-aarch64 ./tests

View File

@ -0,0 +1,36 @@
#!/bin/bash
set -e
# go to the repo root
cd $(dirname $0)/../../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# Pin the dockcross image since newer versions of the image break the build
PINNED_DOCKCROSS_IMAGE_VERSION=dockcross/linux-arm64:20210625-795dd4d
# running dockcross image without any arguments generates a wrapper
# scripts that can be used to run commands under the dockcross image
# easily.
# See https://github.com/dockcross/dockcross#usage for details
docker run $DOCKER_TTY_ARGS --rm $PINNED_DOCKCROSS_IMAGE_VERSION >dockcross-linux-arm64.sh
chmod +x dockcross-linux-arm64.sh
# the wrapper script has CRLF line endings and bash doesn't like that
# so we change CRLF line endings into LF.
sed -i 's/\r//g' dockcross-linux-arm64.sh
# The dockcross wrapper script runs arbitrary commands under the selected dockcross
# image with the following properties which make its use very convenient:
# * the current working directory is mounted under /work so the container can easily
# access the current workspace
# * the processes in the container run under the same UID and GID as the host process so unlike
# vanilla "docker run" invocations, the workspace doesn't get polluted with files
# owned by root.
./dockcross-linux-arm64.sh --image $PINNED_DOCKCROSS_IMAGE_VERSION -- "$@"

View File

@ -0,0 +1,36 @@
#!/bin/bash
set -e
# go to the repo root
cd $(dirname $0)/../../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# Pin the dockcross image since newer versions of the image break the build
PINNED_DOCKCROSS_IMAGE_VERSION=dockcross/manylinux2014-aarch64:20210803-41e5c69
# running dockcross image without any arguments generates a wrapper
# scripts that can be used to run commands under the dockcross image
# easily.
# See https://github.com/dockcross/dockcross#usage for details
docker run $DOCKER_TTY_ARGS --rm $PINNED_DOCKCROSS_IMAGE_VERSION >dockcross-manylinux2014-aarch64.sh
chmod +x dockcross-manylinux2014-aarch64.sh
# the wrapper script has CRLF line endings and bash doesn't like that
# so we change CRLF line endings into LF.
sed -i 's/\r//g' dockcross-manylinux2014-aarch64.sh
# The dockcross wrapper script runs arbitrary commands under the selected dockcross
# image with the following properties which make its use very convenient:
# * the current working directory is mounted under /work so the container can easily
# access the current workspace
# * the processes in the container run under the same UID and GID as the host process so unlike
# vanilla "docker run" invocations, the workspace doesn't get polluted with files
# owned by root.
./dockcross-manylinux2014-aarch64.sh --image $PINNED_DOCKCROSS_IMAGE_VERSION -- "$@"

View File

@ -0,0 +1,19 @@
#!/bin/bash
set -ex
# install the same version of node as in /tests.sh
NODE_VERSION=node-v12.16.3-linux-arm64
NODE_TGZ="$NODE_VERSION.tar.gz"
pushd /tmp
curl -OL https://nodejs.org/dist/v12.16.3/$NODE_TGZ
tar zxvf $NODE_TGZ
export PATH=$PATH:`pwd`/$NODE_VERSION/bin
popd
# go to the repo root
cd $(dirname $0)/../../..
cd js
npm install
npm test

View File

@ -0,0 +1,18 @@
#!/bin/bash
set -ex
# Install composer
curl -sS https://getcomposer.org/installer | php
mkdir -p "$HOME/bin"
mv composer.phar "$HOME/bin/composer"
PATH="$HOME/bin:$PATH"
# go to the repo root
cd $(dirname $0)/../../..
cd php
composer install
composer test
composer test_c

View File

@ -0,0 +1,9 @@
#!/bin/bash
#
# Builds protobuf C++ with aarch64 crosscompiler.
set -ex
./autogen.sh
CXXFLAGS="-fPIC -g -O2" ./configure --host=aarch64
make -j8

View File

@ -0,0 +1,41 @@
#!/bin/bash
#
# Builds protobuf python including the C++ extension with aarch64 crosscompiler.
# The outputs of this script are laid out so that we can later test them under an aarch64 emulator.
# NOTE: This script is expected to run under the dockcross/manylinux2014-aarch64 docker image.
set -ex
PYTHON="/opt/python/cp38-cp38/bin/python"
./autogen.sh
CXXFLAGS="-fPIC -g -O2" ./configure --host=aarch64
make -j8
# create a simple shell wrapper that runs crosscompiled protoc under qemu
echo '#!/bin/bash' >protoc_qemu_wrapper.sh
echo 'exec qemu-aarch64 "../src/protoc" "$@"' >>protoc_qemu_wrapper.sh
chmod ugo+x protoc_qemu_wrapper.sh
# PROTOC variable is by build_py step that runs under ./python directory
export PROTOC=../protoc_qemu_wrapper.sh
pushd python
# NOTE: this step will use protoc_qemu_wrapper.sh to generate protobuf files.
${PYTHON} setup.py build_py
# when crosscompiling for aarch64, --plat-name needs to be set explicitly
# to end up with correctly named wheel file
# the value should be manylinuxABC_ARCH and dockcross docker image
# conveniently provides the value in the AUDITWHEEL_PLAT env
plat_name_flag="--plat-name=$AUDITWHEEL_PLAT"
# override the value of EXT_SUFFIX to make sure the crosscompiled .so files in the wheel have the correct filename suffix
export PROTOCOL_BUFFERS_OVERRIDE_EXT_SUFFIX="$(${PYTHON} -c 'import sysconfig; print(sysconfig.get_config_var("EXT_SUFFIX").replace("-x86_64-linux-gnu.so", "-aarch64-linux-gnu.so"))')"
# Build the python extension inplace to be able to python unittests later
${PYTHON} setup.py build_ext --cpp_implementation --compile_static_extension --inplace
# Build the binary wheel (to check it with auditwheel)
${PYTHON} setup.py bdist_wheel --cpp_implementation --compile_static_extension $plat_name_flag

View File

@ -0,0 +1,28 @@
#!/bin/bash
set -ex
# go to the repo root
cd $(dirname $0)/../../..
cd python
PYTHON="/opt/python/cp38-cp38/bin/python"
${PYTHON} -m pip install --user pytest auditwheel
# check that we are really using aarch64 python
(${PYTHON} -c 'import sysconfig; print(sysconfig.get_platform())' | grep -q "linux-aarch64") || (echo "Wrong python platform, needs to be aarch64 python."; exit 1)
# step 1: run all python unittests
# we've built the python extension previously with --inplace option
# so we can just discover all the unittests and run them directly under
# the python/ directory.
LD_LIBRARY_PATH=../src/.libs PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ${PYTHON} -m pytest google/protobuf
# step 2: run auditwheel show to check that the wheel is manylinux2014 compatible.
# auditwheel needs to run on wheel's target platform (or under an emulator)
${PYTHON} -m auditwheel show dist/protobuf-*-manylinux2014_aarch64.whl
# step 3: smoketest that the wheel can be installed and run a smokecheck
${PYTHON} -m pip install dist/protobuf-*-manylinux2014_aarch64.whl
# when python cpp extension is on, simply importing a message type will trigger loading the cpp extension
PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp ${PYTHON} -c 'import google.protobuf.timestamp_pb2; print("Successfully loaded the python cpp extension!")'

View File

@ -0,0 +1,26 @@
#!/bin/bash
#
# Setup and configure qemu userspace emulator on kokoro worker so that we can seamlessly emulate processes running
# inside docker containers.
set -ex
# show pre-existing qemu registration
cat /proc/sys/fs/binfmt_misc/qemu-aarch64
# Kokoro ubuntu1604 workers have already qemu-user and qemu-user-static packages installed, but it's and old version that:
# * prints warning about some syscalls (e.g "qemu: Unsupported syscall: 278")
# * doesn't register with binfmt_misc with the persistent ("F") flag we need (see below)
#
# To overcome the above limitations, we use the https://github.com/multiarch/qemu-user-static
# docker image to provide a new enough version of qemu-user-static and register it with
# the desired binfmt_misc flags. The most important flag we need is "F" (set by "--persistent yes"),
# which allows the qemu-aarch64-static binary to be loaded eagerly at the time of registration with binfmt_misc.
# That way, we can emulate aarch64 binaries running inside docker containers transparently, without needing the emulator
# binary to be accessible from the docker image we're emulating.
# Note that on newer distributions (such as glinux), simply "apt install qemu-user-static" is sufficient
# to install qemu-user-static with the right flags.
docker run --rm --privileged multiarch/qemu-user-static:5.2.0-2 --reset --credential yes --persistent yes
# Print current qemu reqistration to make sure everything is setup correctly.
cat /proc/sys/fs/binfmt_misc/qemu-aarch64

View File

@ -0,0 +1,14 @@
#!/bin/bash
#
# Crosscompiles protobuf C++ under dockcross docker image and runs the tests under an emulator.
set -e
# go to the repo root
cd $(dirname $0)/../../..
# Initialize any submodules.
git submodule update --init --recursive
# run the C++ build and test script under dockcross/linux-arm64 image
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_linux_aarch64.sh kokoro/linux/aarch64/cpp_crosscompile_and_run_tests_with_qemu_aarch64.sh

View File

@ -0,0 +1,29 @@
#!/bin/bash
set -ex
# go to the repo root
cd $(dirname $0)/../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# First, build protobuf C# tests under x86_64 docker image
# Tests are built "dotnet publish" because we want all the dependencies to the copied to the destination directory
# (we want to avoid references to ~/.nuget that won't be available in the subsequent docker run)
CSHARP_BUILD_COMMAND="dotnet publish -c Release -f net60 csharp/src/Google.Protobuf.Test/Google.Protobuf.Test.csproj"
docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -e "DOTNET_CLI_TELEMETRY_OPTOUT=true" -e "DOTNET_SKIP_FIRST_TIME_EXPERIENCE=true" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work mcr.microsoft.com/dotnet/sdk:6.0.100-bullseye-slim bash -c "$CSHARP_BUILD_COMMAND"
# Use an actual aarch64 docker image to run protobuf C# tests with an emulator. "dotnet vstest" allows
# running tests from a pre-built project.
# * mount the protobuf root as /work to be able to access the crosscompiled files
# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force
# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user
# otherwise the UID would be homeless under the docker container and pip install wouldn't work. For simplicity,
# we just run map the user's home to a throwaway temporary directory
CSHARP_TEST_COMMAND="dotnet vstest csharp/src/Google.Protobuf.Test/bin/Release/net60/publish/Google.Protobuf.Test.dll"
docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -e "DOTNET_CLI_TELEMETRY_OPTOUT=true" -e "DOTNET_SKIP_FIRST_TIME_EXPERIENCE=true" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work mcr.microsoft.com/dotnet/sdk:6.0.100-bullseye-slim-arm64v8 bash -c "$CSHARP_TEST_COMMAND"

View File

@ -0,0 +1,32 @@
#!/bin/bash
set -ex
# go to the repo root
cd $(dirname $0)/../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# crosscompile protoc as we will later need it for the java build.
# we build it under the dockcross/manylinux2014-aarch64 image so that the resulting protoc binary is compatible
# with a wide range of linux distros (including any docker images we will use later to build and test java)
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_manylinux2014_aarch64.sh kokoro/linux/aarch64/protoc_crosscompile_aarch64.sh
# the command that will be used to build and test java under an emulator
# * IsValidUtf8Test and DecodeUtf8Test tests are being skipped because that take very long under an emulator.
TEST_JAVA_COMMAND="mvn --batch-mode -DskipTests install && mvn --batch-mode -Dtest='**/*Test, !**/*IsValidUtf8Test, !**/*DecodeUtf8Test' -DfailIfNoTests=false -Dsurefire.failIfNoSpecifiedTests=false surefire:test"
# use an actual aarch64 docker image (with a real aarch64 java and maven) to run build & test protobuf java under an emulator
# * mount the protobuf root as /work to be able to access the crosscompiled files
# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force
# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user
# otherwise the UID would be homeless under the docker container and pip install wouldn't work. For simplicity,
# we just run map the user's home to a throwaway temporary directory
# * the JAVA_OPTS and MAVEN_CONFIG variables are being set mostly to silence warnings about non-existent home directory
# and to avoid polluting the workspace.
docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -e "JAVA_OPTS=-Duser.home=/home/fake-user" -e "MAVEN_CONFIG=/home/fake-user/.m2" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work arm64v8/maven:3.8-openjdk-11 bash -c "cd java && $TEST_JAVA_COMMAND"

View File

@ -0,0 +1,29 @@
#!/bin/bash
set -ex
# go to the repo root
cd $(dirname $0)/../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# crosscompile protoc as we will later need it for the javascript build.
# we build it under the dockcross/manylinux2014-aarch64 image so that the resulting protoc binary is compatible
# with a wide range of linux distros (including any docker images we will use later to build and test javascript)
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_manylinux2014_aarch64.sh kokoro/linux/aarch64/protoc_crosscompile_aarch64.sh
# use an actual aarch64 docker image (with a real aarch64 nodejs) to run build & test protobuf javascript under an emulator
# * mount the protobuf root as /work to be able to access the crosscompiled files
# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force
# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user
# otherwise the UID would be homeless under the docker container and pip install wouldn't work. For simplicity,
# we just run map the user's home to a throwaway temporary directory
# Note that the docker image used for running the tests is arm64v8/openjdk, not arm64v8/node
# This is because some of the node tests require java to be available and adding node
# binary distribution into a java image is easier than vice versa.
docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work arm64v8/openjdk:11-jdk-buster kokoro/linux/aarch64/javascript_build_and_run_tests_with_qemu_aarch64.sh

View File

@ -0,0 +1,31 @@
#!/bin/bash
set -ex
# go to the repo root
cd $(dirname $0)/../../..
# there is no php testing docker image readily available, so we build
# our own. It's a aarch64 image, but that's fine since qemu will
# automatically be used to run the commands in the dockerfile.
docker build -t testimage_protobuf_php_arm64v8 kokoro/linux/aarch64/testimage_protobuf_php_arm64v8
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# crosscompile protoc as we will later need it for the php build.
# we build it under the dockcross/manylinux2014-aarch64 image so that the resulting protoc binary is compatible
# with a wide range of linux distros (including any docker images we will use later to build and test php)
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_manylinux2014_aarch64.sh kokoro/linux/aarch64/protoc_crosscompile_aarch64.sh
# use an actual aarch64 docker image (with a real aarch64 php) to run build & test protobuf php under an emulator
# * mount the protobuf root as /work to be able to access the crosscompiled files
# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force
# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user
# otherwise the UID would be homeless under the docker container and pip install wouldn't work. For simplicity,
# we just run map the user's home to a throwaway temporary directory
docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work testimage_protobuf_php_arm64v8 kokoro/linux/aarch64/php_build_and_run_tests_with_qemu_aarch64.sh

View File

@ -0,0 +1,26 @@
#!/bin/bash
set -e
# go to the repo root
cd $(dirname $0)/../../..
if [[ -t 0 ]]; then
DOCKER_TTY_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_TTY_ARGS=
fi
# crosscompile python extension and the binary wheel under dockcross/manylinux2014-aarch64 image
kokoro/linux/aarch64/dockcross_helpers/run_dockcross_manylinux2014_aarch64.sh kokoro/linux/aarch64/python_crosscompile_aarch64.sh
# once crosscompilation is done, use an actual aarch64 docker image (with a real aarch64 python) to run all the tests under an emulator
# * mount the protobuf root as /work to be able to access the crosscompiled files
# * intentionally use a different image than manylinux2014 so that we don't build and test on the same linux distribution
# (manylinux_2_24 is debian-based while manylinux2014 is centos-based)
# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force
# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user
# otherwise the UID would be homeless under the docker container and pip install wouldn't work. For simplicity,
# we just run map the user's home to a throwaway temporary directory
docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work quay.io/pypa/manylinux_2_24_aarch64 kokoro/linux/aarch64/python_run_tests_with_qemu_aarch64.sh

View File

@ -0,0 +1,3 @@
FROM arm64v8/debian:buster
RUN apt-get update && apt-get install -y php7.3-cli php7.3-dev php7.3-bcmath composer phpunit curl git valgrind && apt-get clean

View File

@ -0,0 +1,47 @@
#!/bin/bash
#
# Build file to set up and run tests
set -ex
# Install Bazel 4.0.0.
use_bazel.sh 4.0.0
bazel version
# Print bazel testlogs to stdout when tests failed.
function print_test_logs {
# TODO(yannic): Only print logs of failing tests.
testlogs_dir=$(bazel info bazel-testlogs)
testlogs=$(find "${testlogs_dir}" -name "*.log")
for log in $testlogs; do
cat "${log}"
done
}
# Change to repo root
cd $(dirname $0)/../../..
git submodule update --init --recursive
# Disabled for now, re-enable if appropriate.
# //:build_files_updated_unittest \
trap print_test_logs EXIT
bazel test -k --copt=-Werror --host_copt=-Werror \
//java:tests \
//:protoc \
//:protobuf \
//:protobuf_python \
//:protobuf_test \
@com_google_protobuf//:cc_proto_blacklist_test
trap - EXIT
pushd examples
bazel build //...
popd
# Verify that we can build successfully from generated tar files.
./autogen.sh && ./configure && make -j$(nproc) dist
DIST=`ls *.tar.gz`
tar -xf $DIST
cd ${DIST//.tar.gz}
bazel build //:protobuf //:protobuf_java

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/bazel/build.sh"
timeout_mins: 15

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/bazel/build.sh"
timeout_mins: 15

View File

@ -0,0 +1,18 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/java_stretch
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="benchmark"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/benchmark/build.sh"
timeout_mins: 240
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,99 @@
#!/bin/bash
#
# Change to repo root
cd $(dirname $0)/../../..
set -ex
export OUTPUT_DIR=testoutput
repo_root="$(pwd)"
# TODO(jtattermusch): Add back support for benchmarking with tcmalloc for C++ and python.
# This feature was removed since it used to use tcmalloc from https://github.com/gperftools/gperftools.git
# which is very outdated. See https://github.com/protocolbuffers/protobuf/issues/8725.
# download datasets for benchmark
pushd benchmarks
datasets=$(for file in $(find . -type f -name "dataset.*.pb" -not -path "./tmp/*"); do echo "$(pwd)/$file"; done | xargs)
echo $datasets
popd
# build Python protobuf
./autogen.sh
./configure CXXFLAGS="-fPIC -O2"
make -j8
pushd python
python3 -m venv env
source env/bin/activate
python3 setup.py build --cpp_implementation
pip3 install --install-option="--cpp_implementation" .
popd
# build and run Python benchmark
# We do this before building protobuf C++ since C++ build
# will rewrite some libraries used by protobuf python.
pushd benchmarks
make python-pure-python-benchmark
make python-cpp-reflection-benchmark
make -j8 python-cpp-generated-code-benchmark
echo "[" > tmp/python_result.json
echo "benchmarking pure python..."
./python-pure-python-benchmark --json --behavior_prefix="pure-python-benchmark" $datasets >> tmp/python_result.json
echo "," >> "tmp/python_result.json"
echo "benchmarking python cpp reflection..."
env LD_LIBRARY_PATH="${repo_root}/src/.libs" ./python-cpp-reflection-benchmark --json --behavior_prefix="cpp-reflection-benchmark" $datasets >> tmp/python_result.json
echo "," >> "tmp/python_result.json"
echo "benchmarking python cpp generated code..."
env LD_LIBRARY_PATH="${repo_root}/src/.libs" ./python-cpp-generated-code-benchmark --json --behavior_prefix="cpp-generated-code-benchmark" $datasets >> tmp/python_result.json
echo "]" >> "tmp/python_result.json"
popd
# build CPP protobuf
./configure
make clean && make -j8
pushd java
mvn package -B -Dmaven.test.skip=true
popd
pushd benchmarks
# build and run C++ benchmark
# "make clean" deletes the contents of the tmp/ directory, so we move it elsewhere and then restore it once build is done.
# TODO(jtattermusch): find a less clumsy way of protecting python_result.json contents
mv tmp/python_result.json . && make clean && make -j8 cpp-benchmark && mv python_result.json tmp
echo "benchmarking cpp..."
env ./cpp-benchmark --benchmark_min_time=5.0 --benchmark_out_format=json --benchmark_out="tmp/cpp_result.json" $datasets
# TODO(jtattermusch): add benchmarks for https://github.com/protocolbuffers/protobuf-go.
# The original benchmarks for https://github.com/golang/protobuf were removed
# because:
# * they were broken and haven't been producing results for a long time
# * the https://github.com/golang/protobuf implementation has been superseded by
# https://github.com/protocolbuffers/protobuf-go
# build and run java benchmark (java 11 is required)
make java-benchmark
echo "benchmarking java..."
./java-benchmark -Cresults.file.options.file="tmp/java_result.json" $datasets
# TODO(jtattermusch): re-enable JS benchmarks once https://github.com/protocolbuffers/protobuf/issues/8747 is fixed.
# build and run js benchmark
# make js-benchmark
# echo "benchmarking js..."
# ./js-benchmark $datasets --json_output=$(pwd)/tmp/node_result.json
# TODO(jtattermusch): add php-c-benchmark. Currently its build is broken.
# persist raw the results in the build job log (for better debuggability)
cat tmp/cpp_result.json
cat tmp/java_result.json
cat tmp/python_result.json
# print the postprocessed results to the build job log
# TODO(jtattermusch): re-enable uploading results to bigquery (it is currently broken)
make python_add_init
env LD_LIBRARY_PATH="${repo_root}/src/.libs" python3 -m util.result_parser \
-cpp="../tmp/cpp_result.json" -java="../tmp/java_result.json" -python="../tmp/python_result.json"
popd

View File

@ -0,0 +1,64 @@
#!/bin/bash
#
# Builds docker image and runs a command under it.
# This is a generic script that is configured with the following variables:
#
# DOCKERHUB_ORGANIZATION - The organization on docker hub storing the
# Dockerfile.
# DOCKERFILE_DIR - Directory in which Dockerfile file is located.
# DOCKER_RUN_SCRIPT - Script to run under docker (relative to protobuf repo root)
# OUTPUT_DIR - Directory that will be copied from inside docker after finishing.
# $@ - Extra args to pass to docker run
set -ex
cd $(dirname $0)/../..
git_root=$(pwd)
cd -
# Use image name based on Dockerfile sha1
if [ -z "$DOCKERHUB_ORGANIZATION" ]
then
DOCKERHUB_ORGANIZATION=grpctesting/protobuf
DOCKER_IMAGE_NAME=${DOCKERHUB_ORGANIZATION}_$(sha1sum $DOCKERFILE_DIR/Dockerfile | cut -f1 -d\ )
else
# TODO(teboring): Remove this when all tests have been migrated to separate
# docker images.
DOCKERFILE_PREFIX=$(basename $DOCKERFILE_DIR)
DOCKER_IMAGE_NAME=${DOCKERHUB_ORGANIZATION}/${DOCKERFILE_PREFIX}_$(sha1sum $DOCKERFILE_DIR/Dockerfile | cut -f1 -d\ )
fi
# Pull dockerimage from Dockerhub. This sometimes fails intermittently, so we
# keep trying until we succeed.
until docker pull $DOCKER_IMAGE_NAME; do sleep 10; done
# Ensure existence of ccache directory
CCACHE_DIR=/tmp/protobuf-ccache
mkdir -p $CCACHE_DIR
# Choose random name for docker container
CONTAINER_NAME="build_and_run_docker_$(uuidgen)"
echo $git_root
# Run command inside docker
docker run \
"$@" \
-e CCACHE_DIR=$CCACHE_DIR \
-e KOKORO_BUILD_NUMBER=$KOKORO_BUILD_NUMBER \
-e KOKORO_BUILD_ID=$KOKORO_BUILD_ID \
-e EXTERNAL_GIT_ROOT="/var/local/kokoro/protobuf" \
-e TEST_SET="$TEST_SET" \
-v "$git_root:/var/local/kokoro/protobuf:ro" \
-v $CCACHE_DIR:$CCACHE_DIR \
-w /var/local/git/protobuf \
--name=$CONTAINER_NAME \
$DOCKER_IMAGE_NAME \
bash -l "/var/local/kokoro/protobuf/$DOCKER_RUN_SCRIPT" || FAILED="true"
# remove the container, possibly killing it first
docker rm -f $CONTAINER_NAME || true
[ -z "$FAILED" ] || {
exit 1
}

View File

@ -0,0 +1,11 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "continuous" and "presubmit" jobs.
set -ex
# Change to repo root
cd $(dirname $0)/../../..
kokoro/linux/aarch64/test_cpp_aarch64.sh

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/cpp_aarch64/build.sh"
timeout_mins: 120

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/cpp_aarch64/build.sh"
timeout_mins: 120

View File

@ -0,0 +1,25 @@
#!/bin/bash
#
# Build file to set up and run tests
set -ex # exit immediately on error
# Change to repo root
cd $(dirname $0)/../../..
./tests.sh cpp_distcheck
# Run tests under release docker image.
DOCKER_IMAGE_NAME=protobuf/protoc_$(sha1sum protoc-artifacts/Dockerfile | cut -f1 -d " ")
until docker pull $DOCKER_IMAGE_NAME; do sleep 10; done
docker run -v $(pwd):/var/local/protobuf --rm $DOCKER_IMAGE_NAME \
bash -l /var/local/protobuf/tests.sh cpp || FAILED="true"
# This directory is owned by root. We need to delete it, because otherwise
# Kokoro will attempt to rsync it and fail with a permission error.
rm -rf src/core
if [ "$FAILED" = "true" ]; then
exit 1
fi

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/cpp_distcheck/build.sh"
timeout_mins: 1440

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/cpp_distcheck/build.sh"
timeout_mins: 1440

View File

@ -0,0 +1,13 @@
#!/bin/bash
#
# Build file to set up and run tests
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/cpp_tcmalloc
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="cpp_tcmalloc"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/cpp_tcmalloc/build.sh"
timeout_mins: 1440

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/cpp_tcmalloc/build.sh"
timeout_mins: 1440

View File

@ -0,0 +1,18 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/csharp
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="csharp"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/csharp/build.sh"
timeout_mins: 1440

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/csharp/build.sh"
timeout_mins: 1440

View File

@ -0,0 +1,16 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "continuous" and "presubmit" jobs.
set -ex
# Change to repo root
cd $(dirname $0)/../../..
# Initialize any submodules.
git submodule update --init --recursive
kokoro/linux/aarch64/qemu_helpers/prepare_qemu.sh
kokoro/linux/aarch64/test_csharp_aarch64.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/csharp_aarch64/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/csharp_aarch64/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,15 @@
#!/bin/bash
#
# Build file to set up and run tests
set -ex # exit immediately on error
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/java_stretch
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="dist_install"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/dist_install/build.sh"
timeout_mins: 1440

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/dist_install/build.sh"
timeout_mins: 1440

View File

@ -0,0 +1,29 @@
#!/bin/bash
set -ex
cd $(dirname $0)/../../..
git_root=$(pwd)
cd kokoro/linux/dockerfile
DOCKERHUB_ORGANIZATION=protobuftesting
for DOCKERFILE_DIR in test/*
do
# Generate image name based on Dockerfile checksum. That works well as long
# as can count on dockerfiles being written in a way that changing the logical
# contents of the docker image always changes the SHA (e.g. using "ADD file"
# cmd in the dockerfile in not ok as contents of the added file will not be
# reflected in the SHA).
DOCKER_IMAGE_NAME=$(basename $DOCKERFILE_DIR)_$(sha1sum $DOCKERFILE_DIR/Dockerfile | cut -f1 -d\ )
echo $DOCKER_IMAGE_NAME
# skip the image if it already exists in the repo
curl --silent -f -lSL https://registry.hub.docker.com/v2/repositories/${DOCKERHUB_ORGANIZATION}/${DOCKER_IMAGE_NAME}/tags/latest > /dev/null \
&& continue
docker build -t ${DOCKERHUB_ORGANIZATION}/${DOCKER_IMAGE_NAME} ${DOCKERFILE_DIR}
# "docker login" needs to be run in advance
docker push ${DOCKERHUB_ORGANIZATION}/${DOCKER_IMAGE_NAME}
done

View File

@ -0,0 +1,29 @@
FROM debian:jessie
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
&& apt-get clean
# Install dependencies for TC malloc
RUN apt-get install -y \
google-perftools \
libgoogle-perftools4 \
libgoogle-perftools-dev

View File

@ -0,0 +1,41 @@
FROM debian:buster
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
&& apt-get clean
# Update ca-certificates to fix known buster + .NET 5 issue
# https://github.com/NuGet/Announcements/issues/49
RUN apt-get update && apt-get install -y ca-certificates && apt-get clean
# dotnet SDK prerequisites
RUN apt-get update && apt-get install -y libunwind8 libicu63 && apt-get clean
# Install dotnet SDK via install script
RUN wget -q https://dot.net/v1/dotnet-install.sh && \
chmod u+x dotnet-install.sh && \
./dotnet-install.sh --version 3.1.415 && \
./dotnet-install.sh --version 6.0.100 && \
ln -s /root/.dotnet/dotnet /usr/local/bin
RUN wget -q www.nuget.org/NuGet.exe -O /usr/local/bin/nuget.exe
ENV DOTNET_SKIP_FIRST_TIME_EXPERIENCE true

View File

@ -0,0 +1,38 @@
# Despite the name of this image, we are no longer on stretch.
# We should consider renaming this image, and/or evaluating what
# software versions we actually need.
FROM debian:bullseye
# Install dependencies. We start with the basic ones required to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
cmake \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
pkg-config \
time \
wget \
# Java dependencies
maven \
openjdk-11-jdk \
# Required for the gtest build.
python2 \
# Python dependencies
python3-dev \
python3-setuptools \
python3-pip \
python3-venv \
&& apt-get clean

View File

@ -0,0 +1,37 @@
FROM ubuntu:latest
RUN apt-get update && apt-get install -y gnupg
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
&& apt-get clean
##################
# Javascript dependencies.
# We need to set these environment variables so that the Docker build does not
# have to ask for this information while it is installing the tzdata package.
RUN DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles" \
apt-get install -y \
# -- For javascript and closure compiler -- \
npm \
default-jre \
python

View File

@ -0,0 +1,255 @@
FROM debian:jessie
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
re2c \
sqlite3 \
libsqlite3-dev \
&& apt-get clean
# Install php dependencies
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
php5 \
libcurl4-openssl-dev \
libgmp-dev \
libgmp3-dev \
libssl-dev \
libxml2-dev \
unzip \
zlib1g-dev \
pkg-config \
&& apt-get clean
# Install other dependencies
RUN ln -sf /usr/include/x86_64-linux-gnu/gmp.h /usr/include/gmp.h
RUN wget http://ftp.gnu.org/gnu/bison/bison-2.6.4.tar.gz -O /var/local/bison-2.6.4.tar.gz
RUN cd /var/local \
&& tar -zxvf bison-2.6.4.tar.gz \
&& cd /var/local/bison-2.6.4 \
&& ./configure \
&& make \
&& make install
# Install composer
RUN curl -sS https://getcomposer.org/installer | php
RUN mv composer.phar /usr/local/bin/composer
# Download php source code
RUN git clone https://github.com/php/php-src
# php 5.6
RUN cd php-src \
&& git checkout PHP-5.6.39 \
&& ./buildconf --force
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-5.6 \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-5.phar \
&& chmod +x phpunit \
&& mv phpunit /usr/local/php-5.6/bin
# php 7.0
RUN cd php-src \
&& git checkout PHP-7.0.33 \
&& ./buildconf --force
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.0 \
&& make \
&& make install \
&& make clean
RUN cd php-src \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.0-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-6.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.0/bin \
&& mv phpunit /usr/local/php-7.0-zts/bin
# php 7.1
RUN cd php-src \
&& git checkout PHP-7.1.25 \
&& ./buildconf --force
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.1 \
&& make \
&& make install \
&& make clean
RUN cd php-src \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.1-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-7.5.0.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.1/bin \
&& mv phpunit /usr/local/php-7.1-zts/bin
# php 7.2
RUN cd php-src \
&& git checkout PHP-7.2.13 \
&& ./buildconf --force
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.2 \
&& make \
&& make install \
&& make clean
RUN cd php-src \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.2-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-7.5.0.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.2/bin \
&& mv phpunit /usr/local/php-7.2-zts/bin
# php 7.3
RUN cd php-src \
&& git checkout PHP-7.3.0 \
&& ./buildconf --force
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.3 \
&& make \
&& make install \
&& make clean
RUN cd php-src \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.3-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-7.5.0.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.3/bin \
&& mv phpunit /usr/local/php-7.3-zts/bin
# php 7.4
RUN wget https://ftp.gnu.org/gnu/bison/bison-3.0.1.tar.gz -O /var/local/bison-3.0.1.tar.gz
RUN cd /var/local \
&& tar -zxvf bison-3.0.1.tar.gz \
&& cd /var/local/bison-3.0.1 \
&& ./configure \
&& make \
&& make install
RUN wget https://github.com/php/php-src/archive/php-7.4.0.tar.gz -O /var/local/php-7.4.0.tar.gz
RUN cd /var/local \
&& tar -zxvf php-7.4.0.tar.gz
RUN cd /var/local/php-src-php-7.4.0 \
&& ./buildconf --force \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--disable-mbregex \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.4 \
&& make \
&& make install \
&& make clean
RUN cd /var/local/php-src-php-7.4.0 \
&& ./buildconf --force \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--disable-mbregex \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.4-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-8.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.4/bin \
&& mv phpunit /usr/local/php-7.4-zts/bin
# Install php dependencies
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
valgrind \
&& apt-get clean

View File

@ -0,0 +1,124 @@
FROM debian:stretch
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
re2c \
sqlite3 \
vim \
libonig-dev \
libsqlite3-dev \
&& apt-get clean
# Install php dependencies
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
php \
libcurl4-openssl-dev \
libgmp-dev \
libgmp3-dev \
libssl-dev \
libxml2-dev \
unzip \
zlib1g-dev \
pkg-config \
&& apt-get clean
# Install other dependencies
RUN ln -sf /usr/include/x86_64-linux-gnu/gmp.h /usr/include/gmp.h
RUN wget https://ftp.gnu.org/gnu/bison/bison-3.0.1.tar.gz -O /var/local/bison-3.0.1.tar.gz
RUN cd /var/local \
&& tar -zxvf bison-3.0.1.tar.gz \
&& cd /var/local/bison-3.0.1 \
&& ./configure \
&& make \
&& make install
# Install composer
RUN curl -sS https://getcomposer.org/installer | php
RUN mv composer.phar /usr/local/bin/composer
# Download php source code
RUN git clone https://github.com/php/php-src
# php 8.0
RUN cd php-src \
&& git checkout php-8.0.0 \
&& ./buildconf --force
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-8.0 \
&& make \
&& make install \
&& make clean
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--enable-maintainer-zts \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-8.0-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-9.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-8.0/bin \
&& mv phpunit /usr/local/php-8.0-zts/bin
# php 8.1
RUN cd php-src \
&& git checkout php-8.1.2 \
&& ./buildconf --force
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-8.1 \
&& make \
&& make install \
&& make clean
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--enable-maintainer-zts \
--with-gmp \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-8.1-zts \
&& make \
&& make install \
&& make clean
# Install php dependencies
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
valgrind \
&& apt-get clean

View File

@ -0,0 +1,260 @@
FROM i386/debian:jessie
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
re2c \
sqlite3 \
libsqlite3-dev \
&& apt-get clean
# Install php dependencies
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
bison \
php5 \
libcurl4-openssl-dev \
libssl-dev \
libxml2-dev \
unzip \
zlib1g-dev \
pkg-config \
&& apt-get clean
# Install other dependencies
RUN wget http://ftp.gnu.org/gnu/bison/bison-2.6.4.tar.gz -O /var/local/bison-2.6.4.tar.gz
RUN cd /var/local \
&& tar -zxvf bison-2.6.4.tar.gz \
&& cd /var/local/bison-2.6.4 \
&& ./configure \
&& make \
&& make install
# Install composer
RUN php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
RUN php composer-setup.php
RUN mv composer.phar /usr/bin/composer
RUN php -r "unlink('composer-setup.php');"
# Download php source code
RUN git clone https://github.com/php/php-src
# php 5.6
RUN cd php-src \
&& git checkout PHP-5.6.39 \
&& ./buildconf --force
RUN cd php-src \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-5.6 \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-5.phar \
&& chmod +x phpunit \
&& mv phpunit /usr/local/php-5.6/bin
# php 7.0
RUN wget https://github.com/php/php-src/archive/php-7.0.33.tar.gz -O /var/local/php-7.0.33.tar.gz
RUN cd /var/local \
&& tar -zxvf php-7.0.33.tar.gz
RUN cd /var/local/php-src-php-7.0.33 \
&& ./buildconf --force \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.0 \
&& make \
&& make install \
&& make clean
RUN cd /var/local/php-src-php-7.0.33 \
&& ./buildconf --force \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.0-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-6.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.0/bin \
&& mv phpunit /usr/local/php-7.0-zts/bin
# php 7.1
RUN wget https://github.com/php/php-src/archive/php-7.1.25.tar.gz -O /var/local/php-7.1.25.tar.gz
RUN cd /var/local \
&& tar -zxvf php-7.1.25.tar.gz
RUN cd /var/local/php-src-php-7.1.25 \
&& ./buildconf --force \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.1 \
&& make \
&& make install \
&& make clean
RUN cd /var/local/php-src-php-7.1.25 \
&& ./buildconf --force \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.1-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-7.5.0.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.1/bin \
&& mv phpunit /usr/local/php-7.1-zts/bin
# php 7.2
RUN wget https://github.com/php/php-src/archive/php-7.2.13.tar.gz -O /var/local/php-7.2.13.tar.gz
RUN cd /var/local \
&& tar -zxvf php-7.2.13.tar.gz
RUN cd /var/local/php-src-php-7.2.13 \
&& ./buildconf --force \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.2 \
&& make \
&& make install \
&& make clean
RUN cd /var/local/php-src-php-7.2.13 \
&& ./buildconf --force \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.2-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-7.5.0.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.2/bin \
&& mv phpunit /usr/local/php-7.2-zts/bin
# php 7.3
RUN wget https://github.com/php/php-src/archive/php-7.3.0.tar.gz -O /var/local/php-7.3.0.tar.gz
RUN cd /var/local \
&& tar -zxvf php-7.3.0.tar.gz
RUN cd /var/local/php-src-php-7.3.0 \
&& ./buildconf --force \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.3 \
&& make \
&& make install \
&& make clean
RUN cd /var/local/php-src-php-7.3.0 \
&& ./buildconf --force \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.3-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-7.5.0.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.3/bin \
&& mv phpunit /usr/local/php-7.3-zts/bin
# php 7.4
RUN wget https://ftp.gnu.org/gnu/bison/bison-3.0.1.tar.gz -O /var/local/bison-3.0.1.tar.gz
RUN cd /var/local \
&& tar -zxvf bison-3.0.1.tar.gz \
&& cd /var/local/bison-3.0.1 \
&& ./configure \
&& make \
&& make install
RUN wget https://github.com/php/php-src/archive/php-7.4.0.tar.gz -O /var/local/php-7.4.0.tar.gz
RUN cd /var/local \
&& tar -zxvf php-7.4.0.tar.gz
RUN cd /var/local/php-src-php-7.4.0 \
&& ./buildconf --force \
&& ./configure \
--enable-bcmath \
--enable-mbstring \
--disable-mbregex \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.4 \
&& make \
&& make install \
&& make clean
RUN cd /var/local/php-src-php-7.4.0 \
&& ./buildconf --force \
&& ./configure \
--enable-maintainer-zts \
--enable-mbstring \
--disable-mbregex \
--with-openssl \
--with-zlib \
--prefix=/usr/local/php-7.4-zts \
&& make \
&& make install \
&& make clean
RUN wget -O phpunit https://phar.phpunit.de/phpunit-8.phar \
&& chmod +x phpunit \
&& cp phpunit /usr/local/php-7.4/bin \
&& mv phpunit /usr/local/php-7.4-zts/bin
# Install php dependencies
RUN apt-get clean && apt-get update && apt-get install -y --force-yes \
valgrind \
&& apt-get clean

View File

@ -0,0 +1,31 @@
FROM python:3.10-buster
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Python libraries.
RUN python -m pip install --no-cache-dir --upgrade \
pip \
setuptools \
tox \
wheel

View File

@ -0,0 +1,31 @@
FROM python:3.7-buster
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Python libraries.
RUN python -m pip install --no-cache-dir --upgrade \
pip \
setuptools \
tox \
wheel

View File

@ -0,0 +1,31 @@
FROM python:3.8-buster
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Python libraries.
RUN python -m pip install --no-cache-dir --upgrade \
pip \
setuptools \
tox \
wheel

View File

@ -0,0 +1,31 @@
FROM python:3.9-buster
# Install dependencies. We start with the basic ones require to build protoc
# and the C++ build
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
git \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
parallel \
time \
wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Python libraries.
RUN python -m pip install --no-cache-dir --upgrade \
pip \
setuptools \
tox \
wheel

View File

@ -0,0 +1,17 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERFILE_DIR=kokoro/linux/64-bit
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="golang"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/golang/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/golang/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,16 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "continuous" and "presubmit" jobs.
set -ex
# Change to repo root
cd $(dirname $0)/../../..
# Initialize any submodules.
git submodule update --init --recursive
kokoro/linux/aarch64/qemu_helpers/prepare_qemu.sh
kokoro/linux/aarch64/test_java_aarch64.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/java_aarch64/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/java_aarch64/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,17 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERFILE_DIR=kokoro/linux/64-bit
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="java_jdk7"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/java_jdk7/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/java_jdk7/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,19 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
# The image of the Dockerfile sha1 is fetched from the organization
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/java_stretch
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="java_linkage_monitor"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/java_linkage_monitor/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,12 @@
# Config file for running Linkage Monitor in Kokoro
# https://github.com/GoogleCloudPlatform/cloud-opensource-java/tree/master/linkage-monitor
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/java_linkage_monitor/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,17 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERFILE_DIR=kokoro/linux/64-bit
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="java_oracle7"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/java_oracle7/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/java_oracle7/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,18 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/javascript
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="javascript"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/javascript/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/javascript/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,16 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "continuous" and "presubmit" jobs.
set -ex
# Change to repo root
cd $(dirname $0)/../../..
# Initialize any submodules.
git submodule update --init --recursive
kokoro/linux/aarch64/qemu_helpers/prepare_qemu.sh
kokoro/linux/aarch64/test_javascript_aarch64.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/javascript_aarch64/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/javascript_aarch64/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,94 @@
"""Gathers output from test runs and create an XML file in JUnit format.
The output files from the individual tests have been written in a directory
structure like:
$DIR/joblog (output from "parallel --joblog joblog")
$DIR/logs/1/cpp/stdout
$DIR/logs/1/cpp/stderr
$DIR/logs/1/csharp/stdout
$DIR/logs/1/csharp/stderr
$DIR/logs/1/java_jdk7/stdout
$DIR/logs/1/java_jdk7/stderr
etc.
This script bundles them into a single output XML file so Jenkins can show
detailed test results. It runs as the last step before the Jenkins build
finishes.
"""
import os
import sys
from yattag import Doc
from collections import defaultdict
def readtests(basedir):
tests = defaultdict(dict)
# Sample input (note: separators are tabs).
#
# Seq Host Starttime Runtime Send Receive Exitval Signal Command
# 1 : 1456263838.313 0.005 0 0 0 0 echo A
with open(basedir + "/joblog") as jobs:
firstline = next(jobs)
for line in jobs:
values = line.split("\t")
name = values[8].split()[-1]
test = tests[name]
test["name"] = name
test["time"] = values[3]
exitval = values[6]
if int(exitval):
# We don't have a more specific message. User should look at stderr.
test["failure"] = "TEST FAILURE"
else:
test["failure"] = False
for testname in os.listdir(basedir + "/logs/1"):
test = tests[testname]
with open(basedir + "/logs/1/" + testname + "/stdout") as f:
test["stdout"] = f.read()
with open(basedir + "/logs/1/" + testname + "/stderr") as f:
test["stderr"] = f.read()
# The cpp test is special since it doesn't run under parallel so doesn't show
# up in the job log.
tests["cpp"]["name"] = "cpp"
with open(basedir + '/logs/1/cpp/build_time', 'r') as f:
tests["cpp"]["time"] = f.read().strip()
tests["cpp"]["failure"] = False
ret = tests.values()
ret.sort(key=lambda x: x["name"])
return ret
def genxml(tests):
doc, tag, text = Doc().tagtext()
with tag("testsuites"):
with tag("testsuite", name="Protobuf Tests"):
for test in tests:
with tag("testcase", name=test["name"], classname=test["name"],
time=test["time"]):
with tag("system-out"):
text(test["stdout"])
with tag("system-err"):
text(test["stderr"])
if test["failure"]:
with tag("failure"):
text(test["failure"])
return doc.getvalue()
sys.stderr.write("make_test_output.py: writing XML from directory: " +
sys.argv[1] + "\n")
print(genxml(readtests(sys.argv[1])))

View File

@ -0,0 +1,17 @@
#!/bin/bash
#
# This is the entry point for kicking off a Kokoro job. This path is referenced
# from the .cfg files in this directory.
set -ex
cd $(dirname $0)
# Most of our tests use a debug build of PHP, but we do one build against an opt
# php just in case that surfaces anything unexpected.
../test_php.sh gcr.io/protobuf-build/php/linux:8.0.5-14a06550010c0649bf69b6c9b803c1ca609bbb6d
../test_php.sh gcr.io/protobuf-build/php/linux:7.0.33-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d
../test_php.sh gcr.io/protobuf-build/php/linux:7.3.28-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d
../test_php.sh gcr.io/protobuf-build/php/linux:7.4.18-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d
../test_php.sh gcr.io/protobuf-build/php/linux:8.0.5-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/php80/build.sh"
timeout_mins: 20

View File

@ -0,0 +1,5 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/php80/build.sh"
timeout_mins: 20

View File

@ -0,0 +1,16 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "continuous" and "presubmit" jobs.
set -ex
# Change to repo root
cd $(dirname $0)/../../..
# Initialize any submodules.
git submodule update --init --recursive
kokoro/linux/aarch64/qemu_helpers/prepare_qemu.sh
kokoro/linux/aarch64/test_php_aarch64.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/php_aarch64/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/php_aarch64/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,23 @@
#!/bin/bash
#
# This is the entry point for kicking off a Kokoro job. This path is referenced
# from the .cfg files in this directory.
set -ex
# Change to repo base.
cd $(dirname $0)/../../..
docker run $(test -t 0 && echo "-it") -v$PWD:/workspace gcr.io/protobuf-build/php/linux:8.0.5-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d "composer test_valgrind"
docker run $(test -t 0 && echo "-it") -v$PWD:/workspace gcr.io/protobuf-build/php/linux:7.0.33-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d "composer test && composer test_c"
docker run $(test -t 0 && echo "-it") -v$PWD:/workspace gcr.io/protobuf-build/php/linux:7.3.28-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d "composer test && composer test_c"
docker run $(test -t 0 && echo "-it") -v$PWD:/workspace gcr.io/protobuf-build/php/linux:7.4.18-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d "composer test && composer test_c"
docker run $(test -t 0 && echo "-it") -v$PWD:/workspace gcr.io/protobuf-build/php/linux:8.0.5-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d "composer test && composer test_c"
# Run specialized memory leak & multirequest tests.
docker run $(test -t 0 && echo "-it") -v$PWD:/workspace gcr.io/protobuf-build/php/linux:8.0.5-dbg-14a06550010c0649bf69b6c9b803c1ca609bbb6d "composer test_c && tests/multirequest.sh && tests/memory_leak_test.sh"
# Most of our tests use a debug build of PHP, but we do one build against an opt
# php just in case that surfaces anything unexpected.
docker run $(test -t 0 && echo "-it") -v$PWD:/workspace gcr.io/protobuf-build/php/linux:8.0.5-14a06550010c0649bf69b6c9b803c1ca609bbb6d "composer test && composer test_c"

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/php_all/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/php_all/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,25 @@
#!/bin/bash
#
# This is the script that runs inside Docker, once the image has been built,
# to execute all tests for the "pull request" project.
WORKSPACE_BASE=`pwd`
MY_DIR="$(dirname "$0")"
TEST_SCRIPT=./tests.sh
BUILD_DIR=/tmp/protobuf
set -e # exit immediately on error
set -x # display all commands
# The protobuf repository is mounted into our Docker image, but read-only.
# We clone into a directory inside Docker (this is faster than cp).
rm -rf $BUILD_DIR
mkdir -p $BUILD_DIR
cd $BUILD_DIR
git clone /var/local/kokoro/protobuf
cd protobuf
# Initialize any submodules:
git submodule update --init --recursive
$TEST_SCRIPT $TEST_SET

View File

@ -0,0 +1,18 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/python310
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="python310"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/python310/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/python310/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,18 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/python310
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="python310_cpp"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/python310_cpp/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/python310_cpp/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,18 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/python37
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="python37"
./kokoro/linux/build_and_run_docker.sh

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/python37/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,11 @@
# Config file for running tests in Kokoro
# Location of the build script in repository
build_file: "protobuf/kokoro/linux/python37/build.sh"
timeout_mins: 120
action {
define_artifacts {
regex: "**/sponge_log.xml"
}
}

View File

@ -0,0 +1,18 @@
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERHUB_ORGANIZATION=protobuftesting
export DOCKERFILE_DIR=kokoro/linux/dockerfile/test/python37
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="python37_cpp"
./kokoro/linux/build_and_run_docker.sh

Some files were not shown because too many files have changed in this diff Show More