Date   

[meta-security][PATCH 4/4] openssl-tpm-engine: add package

Armin Kuster
 

Signed-off-by: Armin Kuster <akuster808@gmail.com>
---
...ate-tpm-key-support-well-known-key-option.patch | 99 ++++++++
.../files/0002-libtpm-support-env-TPM_SRK_PW.patch | 80 +++++++
.../files/0003-Fix-not-building-libtpm.la.patch | 25 ++
...-tpm-engine-parse-an-encrypted-tpm-SRK-pa.patch | 254 +++++++++++++++++++++
...-tpm-engine-change-variable-c-type-from-c.patch | 34 +++
.../openssl-tpm-engine/openssl-tpm-engine_0.4.2.bb | 78 +++++++
6 files changed, 570 insertions(+)
create mode 100644 meta-tpm/recipes-tpm/openssl-tpm-engine/files/0001-create-tpm-key-support-well-known-key-option.patch
create mode 100644 meta-tpm/recipes-tpm/openssl-tpm-engine/files/0002-libtpm-support-env-TPM_SRK_PW.patch
create mode 100644 meta-tpm/recipes-tpm/openssl-tpm-engine/files/0003-Fix-not-building-libtpm.la.patch
create mode 100644 meta-tpm/recipes-tpm/openssl-tpm-engine/files/0003-tpm-openssl-tpm-engine-parse-an-encrypted-tpm-SRK-pa.patch
create mode 100644 meta-tpm/recipes-tpm/openssl-tpm-engine/files/0004-tpm-openssl-tpm-engine-change-variable-c-type-from-c.patch
create mode 100644 meta-tpm/recipes-tpm/openssl-tpm-engine/openssl-tpm-engine_0.4.2.bb

diff --git a/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0001-create-tpm-key-support-well-known-key-option.patch b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0001-create-tpm-key-support-well-known-key-option.patch
new file mode 100644
index 0000000..67071b6
--- /dev/null
+++ b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0001-create-tpm-key-support-well-known-key-option.patch
@@ -0,0 +1,99 @@
+commit 16dac0cb7b73b8a7088300e45b98ac20819b03ed
+Author: Junxian.Xiao <Junxian.Xiao@windriver.com>
+Date: Wed Jun 19 18:57:13 2013 +0800
+
+support well-known password in openssl-tpm-engine.
+
+Add "-z" option to select well known password in create_tpm_key tool.
+
+Signed-off-by: Junxian.Xiao <Junxian.Xiao@windriver.com>
+
+diff --git a/create_tpm_key.c b/create_tpm_key.c
+index fee917f..7b94d62 100644
+--- a/create_tpm_key.c
++++ b/create_tpm_key.c
+@@ -46,6 +46,8 @@
+ #include <trousers/tss.h>
+ #include <trousers/trousers.h>
+
++#define TPM_WELL_KNOWN_KEY_LEN 20 /*well know key length is 20 bytes zero*/
++
+ #define print_error(a,b) \
+ fprintf(stderr, "%s:%d %s result: 0x%x (%s)\n", __FILE__, __LINE__, \
+ a, b, Trspi_Error_String(b))
+@@ -70,6 +72,7 @@ usage(char *argv0)
+ "\t\t-e|--enc-scheme encryption scheme to use [PKCSV15] or OAEP\n"
+ "\t\t-q|--sig-scheme signature scheme to use [DER] or SHA1\n"
+ "\t\t-s|--key-size key size in bits [2048]\n"
++ "\t\t-z|--zerokey use well known 20 bytes zero as SRK password.\n"
+ "\t\t-a|--auth require a password for the key [NO]\n"
+ "\t\t-p|--popup use TSS GUI popup dialogs to get the password "
+ "for the\n\t\t\t\t key [NO] (implies --auth)\n"
+@@ -147,6 +150,7 @@ int main(int argc, char **argv)
+ int asn1_len;
+ char *filename, c, *openssl_key = NULL;
+ int option_index, auth = 0, popup = 0, wrap = 0;
++ int wellknownkey = 0;
+ UINT32 enc_scheme = TSS_ES_RSAESPKCSV15;
+ UINT32 sig_scheme = TSS_SS_RSASSAPKCS1V15_DER;
+ UINT32 key_size = 2048;
+@@ -154,12 +158,15 @@ int main(int argc, char **argv)
+
+ while (1) {
+ option_index = 0;
+- c = getopt_long(argc, argv, "pe:q:s:ahw:",
++ c = getopt_long(argc, argv, "pe:q:s:zahw:",
+ long_options, &option_index);
+ if (c == -1)
+ break;
+
+ switch (c) {
++ case 'z':
++ wellknownkey = 1;
++ break;
+ case 'a':
+ initFlags |= TSS_KEY_AUTHORIZATION;
+ auth = 1;
+@@ -293,6 +300,8 @@ int main(int argc, char **argv)
+
+ if (srk_authusage) {
+ char *authdata = calloc(1, 128);
++ TSS_FLAG secretMode = TSS_SECRET_MODE_PLAIN;
++ int authlen = 0;
+
+ if (!authdata) {
+ fprintf(stderr, "malloc failed.\n");
+@@ -309,17 +318,26 @@ int main(int argc, char **argv)
+ exit(result);
+ }
+
+- if (EVP_read_pw_string(authdata, 128, "SRK Password: ", 0)) {
+- Tspi_Context_CloseObject(hContext, hKey);
+- Tspi_Context_Close(hContext);
+- free(authdata);
+- exit(result);
++ if (wellknownkey) {
++ memset(authdata, 0, TPM_WELL_KNOWN_KEY_LEN);
++ secretMode = TSS_SECRET_MODE_SHA1;
++ authlen = TPM_WELL_KNOWN_KEY_LEN;
++ }
++ else {
++ if (EVP_read_pw_string(authdata, 128, "SRK Password: ", 0)) {
++ Tspi_Context_CloseObject(hContext, hKey);
++ Tspi_Context_Close(hContext);
++ free(authdata);
++ exit(result);
++ }
++ secretMode = TSS_SECRET_MODE_PLAIN;
++ authlen = strlen(authdata);
+ }
+
+ //Set Secret
+ if ((result = Tspi_Policy_SetSecret(srkUsagePolicy,
+- TSS_SECRET_MODE_PLAIN,
+- strlen(authdata),
++ secretMode,
++ authlen,
+ (BYTE *)authdata))) {
+ print_error("Tspi_Policy_SetSecret", result);
+ free(authdata);
diff --git a/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0002-libtpm-support-env-TPM_SRK_PW.patch b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0002-libtpm-support-env-TPM_SRK_PW.patch
new file mode 100644
index 0000000..f718f2e
--- /dev/null
+++ b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0002-libtpm-support-env-TPM_SRK_PW.patch
@@ -0,0 +1,80 @@
+commit 16dac0cb7b73b8a7088300e45b98ac20819b03ed
+Author: Junxian.Xiao <Junxian.Xiao@windriver.com>
+Date: Wed Jun 19 18:57:13 2013 +0800
+
+support reading SRK password from env TPM_SRK_PW
+
+Add "env TPM_SRK_PW=xxxx" to set password for libtpm.so. Specially,
+use "env TPM_SRK_PW=#WELLKNOWN#" to set well known password.
+
+Signed-off-by: Junxian.Xiao <Junxian.Xiao@windriver.com>
+
+diff --git a/e_tpm.c b/e_tpm.c
+index f3e8bcf..7dcb75a 100644
+--- a/e_tpm.c
++++ b/e_tpm.c
+@@ -38,6 +38,8 @@
+
+ #include "e_tpm.h"
+
++#define TPM_WELL_KNOWN_KEY_LEN 20 /*well know key length is 20 bytes zero*/
++
+ //#define DLOPEN_TSPI
+
+ #ifndef OPENSSL_NO_HW
+@@ -248,6 +250,10 @@ int tpm_load_srk(UI_METHOD *ui, void *cb_data)
+ TSS_RESULT result;
+ UINT32 authusage;
+ BYTE *auth;
++ char *srkPasswd = NULL;
++ TSS_FLAG secretMode = secret_mode;
++ int authlen = 0;
++
+
+ if (hSRK != NULL_HKEY) {
+ DBGFN("SRK is already loaded.");
+@@ -299,18 +305,36 @@ int tpm_load_srk(UI_METHOD *ui, void *cb_data)
+ return 0;
+ }
+
+- if (!tpm_engine_get_auth(ui, (char *)auth, 128, "SRK authorization: ",
+- cb_data)) {
+- Tspi_Context_CloseObject(hContext, hSRK);
+- free(auth);
+- TSSerr(TPM_F_TPM_LOAD_SRK, TPM_R_REQUEST_FAILED);
+- return 0;
++ srkPasswd = getenv("TPM_SRK_PW");
++ if (NULL != srkPasswd) {
++ if (0 == strcmp(srkPasswd, "#WELLKNOWN#")) {
++ memset(auth, 0, TPM_WELL_KNOWN_KEY_LEN);
++ secretMode = TSS_SECRET_MODE_SHA1;
++ authlen = TPM_WELL_KNOWN_KEY_LEN;
++ } else {
++ int authbuflen = 128;
++ memset(auth, 0, authbuflen);
++ strncpy(auth, srkPasswd, authbuflen-1);
++ secretMode = TSS_SECRET_MODE_PLAIN;
++ authlen = strlen(auth);
++ }
++ }
++ else {
++ if (!tpm_engine_get_auth(ui, (char *)auth, 128,
++ "SRK authorization: ", cb_data)) {
++ Tspi_Context_CloseObject(hContext, hSRK);
++ free(auth);
++ TSSerr(TPM_F_TPM_LOAD_SRK, TPM_R_REQUEST_FAILED);
++ return 0;
++ }
++ secretMode = secret_mode;
++ authlen = strlen(auth);
+ }
+
+ /* secret_mode is a global that may be set by engine ctrl
+ * commands. By default, its set to TSS_SECRET_MODE_PLAIN */
+- if ((result = Tspi_Policy_SetSecret(hSRKPolicy, secret_mode,
+- strlen((char *)auth), auth))) {
++ if ((result = Tspi_Policy_SetSecret(hSRKPolicy, secretMode,
++ authlen, auth))) {
+ Tspi_Context_CloseObject(hContext, hSRK);
+ free(auth);
+ TSSerr(TPM_F_TPM_LOAD_SRK, TPM_R_REQUEST_FAILED);
diff --git a/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0003-Fix-not-building-libtpm.la.patch b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0003-Fix-not-building-libtpm.la.patch
new file mode 100644
index 0000000..d24a150
--- /dev/null
+++ b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0003-Fix-not-building-libtpm.la.patch
@@ -0,0 +1,25 @@
+From 7848445a1f4c750ef73bf96f5e89d402f87a1756 Mon Sep 17 00:00:00 2001
+From: Lans Zhang <jia.zhang@windriver.com>
+Date: Mon, 19 Jun 2017 14:54:28 +0800
+Subject: [PATCH] Fix not building libtpm.la
+
+Signed-off-by: Lans Zhang <jia.zhang@windriver.com>
+---
+ Makefile.am | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 6695656..634a7e6 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -10,4 +10,6 @@ libtpm_la_LIBADD=-lcrypto -lc -ltspi
+ libtpm_la_SOURCES=e_tpm.c e_tpm.h e_tpm_err.c
+
+ create_tpm_key_SOURCES=create_tpm_key.c
+-create_tpm_key_LDADD=-ltspi
++create_tpm_key_LDFLAGS=-ltspi
++
++LDADD=libtpm.la
+--
+2.7.5
+
diff --git a/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0003-tpm-openssl-tpm-engine-parse-an-encrypted-tpm-SRK-pa.patch b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0003-tpm-openssl-tpm-engine-parse-an-encrypted-tpm-SRK-pa.patch
new file mode 100644
index 0000000..a88148f
--- /dev/null
+++ b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0003-tpm-openssl-tpm-engine-parse-an-encrypted-tpm-SRK-pa.patch
@@ -0,0 +1,254 @@
+From eb28ad92a2722fd30f8114840cf2b1ade26b80ee Mon Sep 17 00:00:00 2001
+From: Limeng <Meng.Li@windriver.com>
+Date: Fri, 23 Jun 2017 11:39:04 +0800
+Subject: [PATCH] tpm:openssl-tpm-engine:parse an encrypted tpm SRK password
+ from env
+
+Before, we support reading SRK password from env TPM_SRK_PW,
+but it is a plain password and not secure.
+So, we improve it and support to get an encrypted (AES algorithm)
+SRK password from env, and then parse it. The default decrypting
+AES password and salt is set in bb file.
+When we initialize TPM, and set a SRK pw, and then we need to
+encrypt it with the same AES password and salt by AES algorithm.
+At last, we set a env as below:
+export TPM_SRK_ENC_PW=xxxxxxxx
+"xxxxxxxx" is the encrypted SRK password for libtpm.so.
+
+Signed-off-by: Meng Li <Meng.Li@windriver.com>
+---
+ e_tpm.c | 157 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
+ e_tpm.h | 4 ++
+ e_tpm_err.c | 4 ++
+ 3 files changed, 164 insertions(+), 1 deletion(-)
+
+diff --git a/e_tpm.c b/e_tpm.c
+index 7dcb75a..11bf74b 100644
+--- a/e_tpm.c
++++ b/e_tpm.c
+@@ -245,6 +245,118 @@ void ENGINE_load_tpm(void)
+ ERR_clear_error();
+ }
+
++static int tpm_decode_base64(unsigned char *indata,
++ int in_len,
++ unsigned char *outdata,
++ int *out_len)
++{
++ int total_len, len, ret;
++ EVP_ENCODE_CTX dctx;
++
++ EVP_DecodeInit(&dctx);
++
++ total_len = 0;
++ ret = EVP_DecodeUpdate(&dctx, outdata, &len, indata, in_len);
++ if (ret < 0) {
++ TSSerr(TPM_F_TPM_DECODE_BASE64, TPM_R_DECODE_BASE64_FAILED);
++ return 1;
++ }
++
++ total_len += len;
++ ret = EVP_DecodeFinal(&dctx, outdata, &len);
++ if (ret < 0) {
++ TSSerr(TPM_F_TPM_DECODE_BASE64, TPM_R_DECODE_BASE64_FAILED);
++ return 1;
++ }
++ total_len += len;
++
++ *out_len = total_len;
++
++ return 0;
++}
++
++static int tpm_decrypt_srk_pw(unsigned char *indata, int in_len,
++ unsigned char *outdata,
++ int *out_len)
++{
++ int dec_data_len, dec_data_lenfinal;
++ unsigned char dec_data[256];
++ unsigned char *aes_pw;
++ unsigned char aes_salt[PKCS5_SALT_LEN];
++ unsigned char key[EVP_MAX_KEY_LENGTH], iv[EVP_MAX_IV_LENGTH];
++ const EVP_CIPHER *cipher = NULL;
++ const EVP_MD *dgst = NULL;
++ EVP_CIPHER_CTX *ctx = NULL;
++
++ if (sizeof(SRK_DEC_SALT) - 1 > PKCS5_SALT_LEN) {
++ TSSerr(TPM_F_TPM_DECRYPT_SRK_PW, TPM_R_DECRYPT_SRK_PW_FAILED);
++ return 1;
++ }
++
++ aes_pw = malloc(sizeof(SRK_DEC_PW) - 1);
++ if (aes_pw == NULL) {
++ TSSerr(TPM_F_TPM_DECRYPT_SRK_PW, TPM_R_DECRYPT_SRK_PW_FAILED);
++ return 1;
++ }
++
++ memset(aes_salt, 0x00, sizeof(aes_salt));
++ memcpy(aes_pw, SRK_DEC_PW, sizeof(SRK_DEC_PW) - 1);
++ memcpy(aes_salt, SRK_DEC_SALT, sizeof(SRK_DEC_SALT) - 1);
++
++ cipher = EVP_get_cipherbyname("aes-128-cbc");
++ if (cipher == NULL) {
++ TSSerr(TPM_F_TPM_DECRYPT_SRK_PW, TPM_R_DECRYPT_SRK_PW_FAILED);
++ free(aes_pw);
++ return 1;
++ }
++ dgst = EVP_sha256();
++
++ EVP_BytesToKey(cipher, dgst, aes_salt, (unsigned char *)aes_pw, sizeof(SRK_DEC_PW) - 1, 1, key, iv);
++
++ ctx = EVP_CIPHER_CTX_new();
++ /* Don't set key or IV right away; we want to check lengths */
++ if (!EVP_CipherInit_ex(ctx, cipher, NULL, NULL, NULL, 0)) {
++ TSSerr(TPM_F_TPM_DECRYPT_SRK_PW, TPM_R_DECRYPT_SRK_PW_FAILED);
++ free(aes_pw);
++ return 1;
++ }
++
++ OPENSSL_assert(EVP_CIPHER_CTX_key_length(ctx) == 16);
++ OPENSSL_assert(EVP_CIPHER_CTX_iv_length(ctx) == 16);
++
++ if (!EVP_CipherInit_ex(ctx, NULL, NULL, key, iv, 0)) {
++ TSSerr(TPM_F_TPM_DECRYPT_SRK_PW, TPM_R_DECRYPT_SRK_PW_FAILED);
++ free(aes_pw);
++ return 1;
++ }
++
++ if (!EVP_CipherUpdate(ctx, dec_data, &dec_data_len, indata, in_len)) {
++ /* Error */
++ TSSerr(TPM_F_TPM_DECRYPT_SRK_PW, TPM_R_DECRYPT_SRK_PW_FAILED);
++ free(aes_pw);
++ EVP_CIPHER_CTX_free(ctx);
++ return 1;
++ }
++
++ if (!EVP_CipherFinal_ex(ctx, dec_data + dec_data_len, &dec_data_lenfinal)) {
++ /* Error */
++ TSSerr(TPM_F_TPM_DECRYPT_SRK_PW, TPM_R_DECRYPT_SRK_PW_FAILED);
++ free(aes_pw);
++ EVP_CIPHER_CTX_free(ctx);
++ return 1;
++ }
++
++ dec_data_len = dec_data_len + dec_data_lenfinal;
++
++ memcpy(outdata, dec_data, dec_data_len);
++ *out_len = dec_data_len;
++
++ free(aes_pw);
++ EVP_CIPHER_CTX_free(ctx);
++
++ return 0;
++}
++
+ int tpm_load_srk(UI_METHOD *ui, void *cb_data)
+ {
+ TSS_RESULT result;
+@@ -305,8 +417,50 @@ int tpm_load_srk(UI_METHOD *ui, void *cb_data)
+ return 0;
+ }
+
+- srkPasswd = getenv("TPM_SRK_PW");
++ srkPasswd = getenv("TPM_SRK_ENC_PW");
+ if (NULL != srkPasswd) {
++ int in_len = strlen(srkPasswd);
++ int out_len;
++ unsigned char *out_buf;
++
++ if (!in_len || in_len % 4) {
++ Tspi_Context_CloseObject(hContext, hSRK);
++ free(auth);
++ TSSerr(TPM_F_TPM_LOAD_SRK, TPM_R_REQUEST_FAILED);
++ return 0;
++ }
++
++ out_len = in_len * 3 / 4;
++ out_buf = malloc(out_len);
++ if (NULL == out_buf) {
++ Tspi_Context_CloseObject(hContext, hSRK);
++ free(auth);
++ TSSerr(TPM_F_TPM_LOAD_SRK, TPM_R_REQUEST_FAILED);
++ return 0;
++ }
++
++ if (tpm_decode_base64(srkPasswd, strlen(srkPasswd),
++ out_buf, &out_len)) {
++ Tspi_Context_CloseObject(hContext, hSRK);
++ free(auth);
++ free(out_buf);
++ TSSerr(TPM_F_TPM_LOAD_SRK, TPM_R_REQUEST_FAILED);
++ return 0;
++ }
++
++ if (tpm_decrypt_srk_pw(out_buf, out_len,
++ auth, &authlen)) {
++ Tspi_Context_CloseObject(hContext, hSRK);
++ free(auth);
++ free(out_buf);
++ TSSerr(TPM_F_TPM_LOAD_SRK, TPM_R_REQUEST_FAILED);
++ return 0;
++ }
++ secretMode = TSS_SECRET_MODE_PLAIN;
++ free(out_buf);
++ }
++#ifdef TPM_SRK_PLAIN_PW
++ else if (NULL != (srkPasswd = getenv("TPM_SRK_PW")) {
+ if (0 == strcmp(srkPasswd, "#WELLKNOWN#")) {
+ memset(auth, 0, TPM_WELL_KNOWN_KEY_LEN);
+ secretMode = TSS_SECRET_MODE_SHA1;
+@@ -319,6 +473,7 @@ int tpm_load_srk(UI_METHOD *ui, void *cb_data)
+ authlen = strlen(auth);
+ }
+ }
++#endif
+ else {
+ if (!tpm_engine_get_auth(ui, (char *)auth, 128,
+ "SRK authorization: ", cb_data)) {
+diff --git a/e_tpm.h b/e_tpm.h
+index 6316e0b..56ff202 100644
+--- a/e_tpm.h
++++ b/e_tpm.h
+@@ -66,6 +66,8 @@ void ERR_TSS_error(int function, int reason, char *file, int line);
+ #define TPM_F_TPM_FILL_RSA_OBJECT 116
+ #define TPM_F_TPM_ENGINE_GET_AUTH 117
+ #define TPM_F_TPM_CREATE_SRK_POLICY 118
++#define TPM_F_TPM_DECODE_BASE64 119
++#define TPM_F_TPM_DECRYPT_SRK_PW 120
+
+ /* Reason codes. */
+ #define TPM_R_ALREADY_LOADED 100
+@@ -96,6 +98,8 @@ void ERR_TSS_error(int function, int reason, char *file, int line);
+ #define TPM_R_ID_INVALID 125
+ #define TPM_R_UI_METHOD_FAILED 126
+ #define TPM_R_UNKNOWN_SECRET_MODE 127
++#define TPM_R_DECODE_BASE64_FAILED 128
++#define TPM_R_DECRYPT_SRK_PW_FAILED 129
+
+ /* structure pointed to by the RSA object's app_data pointer */
+ struct rsa_app_data
+diff --git a/e_tpm_err.c b/e_tpm_err.c
+index 25a5d0f..439e267 100644
+--- a/e_tpm_err.c
++++ b/e_tpm_err.c
+@@ -235,6 +235,8 @@ static ERR_STRING_DATA TPM_str_functs[] = {
+ {ERR_PACK(0, TPM_F_TPM_BIND_FN, 0), "TPM_BIND_FN"},
+ {ERR_PACK(0, TPM_F_TPM_FILL_RSA_OBJECT, 0), "TPM_FILL_RSA_OBJECT"},
+ {ERR_PACK(0, TPM_F_TPM_ENGINE_GET_AUTH, 0), "TPM_ENGINE_GET_AUTH"},
++ {ERR_PACK(0, TPM_F_TPM_DECODE_BASE64, 0), "TPM_DECODE_BASE64"},
++ {ERR_PACK(0, TPM_F_TPM_DECRYPT_SRK_PW, 0), "TPM_DECRYPT_SRK_PW"},
+ {0, NULL}
+ };
+
+@@ -265,6 +267,8 @@ static ERR_STRING_DATA TPM_str_reasons[] = {
+ {TPM_R_FILE_READ_FAILED, "failed reading the key file"},
+ {TPM_R_ID_INVALID, "engine id doesn't match"},
+ {TPM_R_UI_METHOD_FAILED, "ui function failed"},
++ {TPM_R_DECODE_BASE64_FAILED, "decode base64 failed"},
++ {TPM_R_DECRYPT_SRK_PW_FAILED, "decrypt srk password failed"},
+ {0, NULL}
+ };
+
+--
+2.9.3
+
diff --git a/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0004-tpm-openssl-tpm-engine-change-variable-c-type-from-c.patch b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0004-tpm-openssl-tpm-engine-change-variable-c-type-from-c.patch
new file mode 100644
index 0000000..076704d
--- /dev/null
+++ b/meta-tpm/recipes-tpm/openssl-tpm-engine/files/0004-tpm-openssl-tpm-engine-change-variable-c-type-from-c.patch
@@ -0,0 +1,34 @@
+From fb44e2814fd819c086f9a4c925427f89c0e8cec6 Mon Sep 17 00:00:00 2001
+From: Limeng <Meng.Li@windriver.com>
+Date: Fri, 21 Jul 2017 16:32:02 +0800
+Subject: [PATCH] tpm:openssl-tpm-engine: change variable c type from char
+ into int
+
+refer to getopt_long() function definition, its return value type is
+int. So, change variable c type from char into int.
+On arm platform, when getopt_long() calling fails, if we define c as
+char type, its value will be 255, not -1. This will cause code enter
+wrong case.
+
+Signed-off-by: Meng Li <Meng.Li@windriver.com>
+---
+ create_tpm_key.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/create_tpm_key.c b/create_tpm_key.c
+index 7b94d62..f30af90 100644
+--- a/create_tpm_key.c
++++ b/create_tpm_key.c
+@@ -148,7 +148,8 @@ int main(int argc, char **argv)
+ ASN1_OCTET_STRING *blob_str;
+ unsigned char *blob_asn1 = NULL;
+ int asn1_len;
+- char *filename, c, *openssl_key = NULL;
++ char *filename, *openssl_key = NULL;
++ int c;
+ int option_index, auth = 0, popup = 0, wrap = 0;
+ int wellknownkey = 0;
+ UINT32 enc_scheme = TSS_ES_RSAESPKCSV15;
+--
+1.7.9.5
+
diff --git a/meta-tpm/recipes-tpm/openssl-tpm-engine/openssl-tpm-engine_0.4.2.bb b/meta-tpm/recipes-tpm/openssl-tpm-engine/openssl-tpm-engine_0.4.2.bb
new file mode 100644
index 0000000..4854f70
--- /dev/null
+++ b/meta-tpm/recipes-tpm/openssl-tpm-engine/openssl-tpm-engine_0.4.2.bb
@@ -0,0 +1,78 @@
+DESCRIPTION = "OpenSSL secure engine based on TPM hardware"
+HOMEPAGE = "https://sourceforge.net/projects/trousers/"
+SECTION = "security/tpm"
+
+LICENSE = "openssl"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=11f0ee3af475c85b907426e285c9bb52"
+
+DEPENDS += "openssl trousers"
+
+SRC_URI = "\
+ git://git.code.sf.net/p/trousers/openssl_tpm_engine \
+ file://0001-create-tpm-key-support-well-known-key-option.patch \
+ file://0002-libtpm-support-env-TPM_SRK_PW.patch \
+ file://0003-Fix-not-building-libtpm.la.patch \
+ file://0003-tpm-openssl-tpm-engine-parse-an-encrypted-tpm-SRK-pa.patch \
+ file://0004-tpm-openssl-tpm-engine-change-variable-c-type-from-c.patch \
+"
+SRCREV = "bbc2b1af809f20686e0d3553a62f0175742c0d60"
+
+S = "${WORKDIR}/git"
+
+inherit autotools-brokensep
+
+# The definitions below are used to decrypt the srk password.
+# It is allowed to define the values in 3 forms: string, hex number and
+# the hybrid, e.g,
+# srk_dec_pw = "incendia"
+# srk_dec_pw = "\x69\x6e\x63\x65\x6e\x64\x69\x61"
+# srk_dec_pw = "\x1""nc""\x3""nd""\x1""a"
+#
+# Due to the limit of escape character, the hybrid must be written in
+# above style. The actual values defined below in C code style are:
+# srk_dec_pw[] = { 0x01, 'n', 'c', 0x03, 'n', 'd', 0x01, 'a' };
+# srk_dec_salt[] = { 'r', 0x00, 0x00, 't' };
+srk_dec_pw ?= "\\"\\\x1\\"\\"nc\\"\\"\\\x3\\"\\"nd\\"\\"\\\x1\\"\\"a\\""
+srk_dec_salt ?= "\\"r\\"\\"\\\x00\\\x00\\"\\"t\\""
+
+CFLAGS_append += "-DSRK_DEC_PW=${srk_dec_pw} -DSRK_DEC_SALT=${srk_dec_salt}"
+
+# Uncomment below line if using the plain srk password for development
+#CFLAGS_append += "-DTPM_SRK_PLAIN_PW"
+
+do_configure_prepend() {
+ cd "${S}"
+ cp LICENSE COPYING
+ touch NEWS AUTHORS ChangeLog
+}
+
+do_install_append() {
+ install -m 0755 -d "${D}${libdir}/engines"
+ install -m 0755 -d "${D}${prefix}/local/ssl/lib/engines"
+ install -m 0755 -d "${D}${libdir}/ssl/engines"
+
+ cp -f "${D}${libdir}/openssl/engines/libtpm.so.0.0.0" "${D}${libdir}/libtpm.so.0"
+ cp -f "${D}${libdir}/openssl/engines/libtpm.so.0.0.0" "${D}${libdir}/engines/libtpm.so"
+ cp -f "${D}${libdir}/openssl/engines/libtpm.so.0.0.0" "${D}${prefix}/local/ssl/lib/engines/libtpm.so"
+ mv -f "${D}${libdir}/openssl/engines/libtpm.so.0.0.0" "${D}${libdir}/ssl/engines/libtpm.so"
+ mv -f "${D}${libdir}/openssl/engines/libtpm.la" "${D}${libdir}/ssl/engines/libtpm.la"
+ rm -rf "${D}${libdir}/openssl"
+}
+
+FILES_${PN}-staticdev += "${libdir}/ssl/engines/libtpm.la"
+FILES_${PN}-dbg += "\
+ ${libdir}/ssl/engines/.debug \
+ ${libdir}/engines/.debug \
+ ${prefix}/local/ssl/lib/engines/.debug \
+"
+FILES_${PN} += "\
+ ${libdir}/ssl/engines/libtpm.so* \
+ ${libdir}/engines/libtpm.so* \
+ ${libdir}/libtpm.so* \
+ ${prefix}/local/ssl/lib/engines/libtpm.so* \
+"
+
+RDEPENDS_${PN} += "libcrypto libtspi"
+
+INSANE_SKIP_${PN} = "libdir"
+INSANE_SKIP_${PN}-dbg = "libdir"
--
2.7.4


[meta-security][PATCH 3/4] tpm2-abrmd: add package

Armin Kuster
 

Signed-off-by: Armin Kuster <akuster808@gmail.com>
---
.../tpm2-abrmd/files/tpm2-abrmd-init.sh | 65 ++++++++++++++++++++++
.../tpm2-abrmd/files/tpm2-abrmd.default | 1 +
.../recipes-tpm/tpm2-abrmd/tpm2-abrmd_1.1.1.bb | 54 ++++++++++++++++++
3 files changed, 120 insertions(+)
create mode 100644 meta-tpm/recipes-tpm/tpm2-abrmd/files/tpm2-abrmd-init.sh
create mode 100644 meta-tpm/recipes-tpm/tpm2-abrmd/files/tpm2-abrmd.default
create mode 100644 meta-tpm/recipes-tpm/tpm2-abrmd/tpm2-abrmd_1.1.1.bb

diff --git a/meta-tpm/recipes-tpm/tpm2-abrmd/files/tpm2-abrmd-init.sh b/meta-tpm/recipes-tpm/tpm2-abrmd/files/tpm2-abrmd-init.sh
new file mode 100644
index 0000000..c8dfb7d
--- /dev/null
+++ b/meta-tpm/recipes-tpm/tpm2-abrmd/files/tpm2-abrmd-init.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides: tpm2-abrmd
+# Required-Start: $local_fs $remote_fs $network
+# Required-Stop: $local_fs $remote_fs $network
+# Should-Start:
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: starts tpm2-abrmd
+# Description: tpm2-abrmd implements the TCG resource manager
+### END INIT INFO
+
+PATH=/sbin:/bin:/usr/sbin:/usr/bin
+DAEMON=/usr/sbin/tpm2-abrmd
+NAME=tpm2-abrmd
+DESC="TCG TSS2 Access Broker and Resource Management daemon"
+USER="tss"
+
+test -x "${DAEMON}" || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+case "${1}" in
+ start)
+ echo -n "Starting $DESC: "
+
+ if [ ! -e /dev/tpm* ]
+ then
+ echo "device driver not loaded, skipping."
+ exit 0
+ fi
+
+ start-stop-daemon --start --quiet --oknodo --background --pidfile /var/run/${NAME}.pid --user ${USER} --chuid ${USER} --exec ${DAEMON} -- ${DAEMON_OPTS}
+ RETVAL="$?"
+ echo "$NAME."
+ [ "$RETVAL" = 0 ] && pidof $DAEMON > /var/run/${NAME}.pid
+ exit $RETVAL
+ ;;
+
+ stop)
+ echo -n "Stopping $DESC: "
+
+ start-stop-daemon --stop --quiet --oknodo --pidfile /var/run/${NAME}.pid --user ${USER} --exec ${DAEMON}
+ RETVAL="$?"
+ echo "$NAME."
+ rm -f /var/run/${NAME}.pid
+ exit $RETVAL
+ ;;
+
+ restart|force-reload)
+ "${0}" stop
+ sleep 1
+ "${0}" start
+ exit $?
+ ;;
+ *)
+ echo "Usage: ${NAME} {start|stop|restart|force-reload|status}" >&2
+ exit 3
+ ;;
+esac
+
+exit 0
diff --git a/meta-tpm/recipes-tpm/tpm2-abrmd/files/tpm2-abrmd.default b/meta-tpm/recipes-tpm/tpm2-abrmd/files/tpm2-abrmd.default
new file mode 100644
index 0000000..987978a
--- /dev/null
+++ b/meta-tpm/recipes-tpm/tpm2-abrmd/files/tpm2-abrmd.default
@@ -0,0 +1 @@
+DAEMON_OPTS="--tcti=device --logger=syslog --max-connections=20 --max-transient-objects=20 --fail-on-loaded-trans"
diff --git a/meta-tpm/recipes-tpm/tpm2-abrmd/tpm2-abrmd_1.1.1.bb b/meta-tpm/recipes-tpm/tpm2-abrmd/tpm2-abrmd_1.1.1.bb
new file mode 100644
index 0000000..27e2408
--- /dev/null
+++ b/meta-tpm/recipes-tpm/tpm2-abrmd/tpm2-abrmd_1.1.1.bb
@@ -0,0 +1,54 @@
+SUMMARY = "TPM2 Access Broker & Resource Manager"
+DESCRIPTION = "This is a system daemon implementing the TPM2 access \
+broker (TAB) & Resource Manager (RM) spec from the TCG. The daemon (tpm2-abrmd) \
+is implemented using Glib and the GObject system. In this documentation and \
+in the code we use `tpm2-abrmd` and `tabrmd` interchangeably. \
+"
+SECTION = "security/tpm"
+
+LICENSE = "BSD-2-Clause"
+LIC_FILES_CHKSUM = "file://${S}/LICENSE;md5=500b2e742befc3da00684d8a1d5fd9da"
+
+DEPENDS += "autoconf-archive dbus glib-2.0 pkgconfig tpm2.0-tss glib-2.0-native"
+
+SRC_URI = "\
+ git://github.com/01org/tpm2-abrmd.git \
+ file://tpm2-abrmd-init.sh \
+ file://tpm2-abrmd.default \
+"
+SRCREV = "c2ccda956bf15165770682dd5c578c58ee5fa6e2"
+
+S = "${WORKDIR}/git"
+
+inherit autotools pkgconfig systemd update-rc.d useradd
+
+SYSTEMD_PACKAGES += "${PN}"
+SYSTEMD_SERVICE_${PN} = "tpm2-abrmd.service"
+SYSTEMD_AUTO_ENABLE_${PN} = "disable"
+
+INITSCRIPT_NAME = "${PN}"
+INITSCRIPT_PARAMS = "start 99 2 3 4 5 . stop 19 0 1 6 ."
+
+USERADD_PACKAGES = "${PN}"
+GROUPADD_PARAM_${PN} = "tss"
+USERADD_PARAM_${PN} = "--system -M -d /var/lib/tpm -s /bin/false -g tss tss"
+
+PACKAGECONFIG ?="udev"
+PACKAGECONFIG += "${@bb.utils.contains('DISTRO_FEATURES','systemd','systemd', '', d)}"
+
+PACKAGECONFIG[systemd] = "--with-systemdsystemunitdir=${systemd_system_unitdir}, --with-systemdsystemunitdir=no"
+PACKAGECONFIG[udev] = "--with-udevrulesdir=${sysconfdir}/udev/rules.d, --without-udevrulesdir"
+
+do_install_append() {
+ install -d "${D}${sysconfdir}/init.d"
+ install -m 0755 "${WORKDIR}/tpm2-abrmd-init.sh" "${D}${sysconfdir}/init.d/tpm2-abrmd"
+
+ install -d "${D}${sysconfdir}/default"
+ install -m 0644 "${WORKDIR}/tpm2-abrmd.default" "${D}${sysconfdir}/default/tpm2-abrmd"
+}
+
+FILES_${PN} += "${libdir}/systemd/system-preset"
+
+RDEPENDS_${PN} += "libgcc dbus-glib libtss2 libtctidevice libtctisocket"
+
+BBCLASSEXTEND = "native"
--
2.7.4


[meta-security][PATCH 2/4] tpm-quote-tools: Add package

Armin Kuster
 

Signed-off-by: Armin Kuster <akuster808@gmail.com>
---
.../tpm-quote-tools/tpm-quote-tools_1.0.4.bb | 23 ++++++++++++++++++++++
1 file changed, 23 insertions(+)
create mode 100644 meta-tpm/recipes-tpm/tpm-quote-tools/tpm-quote-tools_1.0.4.bb

diff --git a/meta-tpm/recipes-tpm/tpm-quote-tools/tpm-quote-tools_1.0.4.bb b/meta-tpm/recipes-tpm/tpm-quote-tools/tpm-quote-tools_1.0.4.bb
new file mode 100644
index 0000000..8486d00
--- /dev/null
+++ b/meta-tpm/recipes-tpm/tpm-quote-tools/tpm-quote-tools_1.0.4.bb
@@ -0,0 +1,23 @@
+SUMMARY = "The TPM Quote Tools is a collection of programs that provide support \
+ for TPM based attestation using the TPM quote mechanism. \
+ "
+DESCRIPTION = "The TPM Quote Tools is a collection of programs that provide support \
+ for TPM based attestation using the TPM quote mechanism. The manual \
+ page for tpm_quote_tools provides a usage overview. \
+ \
+ TPM Quote Tools has been tested with TrouSerS on Linux and NTRU on \
+ Windows XP. It was ported to Windows using MinGW and MSYS. \
+ "
+HOMEPAGE = "https://sourceforge.net/projects/tpmquotetools/"
+SECTION = "security/tpm"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://COPYING;md5=8ec30b01163d242ecf07d9cd84e3611f"
+
+DEPENDS = "libtspi tpm-tools"
+
+SRC_URI = "${SOURCEFORGE_MIRROR}/tpmquotetools/${PV}/${BP}.tar.gz"
+
+SRC_URI[md5sum] = "6e194f5bc534301bbaef53dc6d22c233"
+SRC_URI[sha256sum] = "10dc4eade02635557a9496b388360844cd18e7864e2eb882f5e45ab2fa405ae2"
+
+inherit autotools
--
2.7.4


[meta-security][PATCH 1/4] pcr-extend: add new package

Armin Kuster
 

Signed-off-by: Armin Kuster <akuster808@gmail.com>
---
meta-tpm/recipes-tpm/pcr-extend/pcr-extend_git.bb | 25 +++++++++++++++++++++++
1 file changed, 25 insertions(+)
create mode 100644 meta-tpm/recipes-tpm/pcr-extend/pcr-extend_git.bb

diff --git a/meta-tpm/recipes-tpm/pcr-extend/pcr-extend_git.bb b/meta-tpm/recipes-tpm/pcr-extend/pcr-extend_git.bb
new file mode 100644
index 0000000..0cc4f63
--- /dev/null
+++ b/meta-tpm/recipes-tpm/pcr-extend/pcr-extend_git.bb
@@ -0,0 +1,25 @@
+SUMMARY = "Command line utility to extend hash of arbitrary data into a TPMs PCR."
+HOMEPAGE = "https://github.com/flihp/pcr-extend"
+SECTION = "security/tpm"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
+
+DEPENDS = "libtspi"
+
+PV = "0.1+git${SRCPV}"
+SRCREV = "c02ad8f628b3d99f6d4c087b402fe31a40ee6316"
+
+SRC_URI = "git://github.com/flihp/pcr-extend.git "
+
+inherit autotools
+
+S = "${WORKDIR}/git"
+
+do_compile() {
+ oe_runmake -C ${S}/src
+}
+
+do_install() {
+ install -d ${D}${bindir}
+ oe_runmake -C ${S}/src DESTDIR="${D}" install
+}
--
2.7.4


Re: u-boot recipe: Missing dependencies

Eric Schwarz
 

Hi Ross,

DEPENDS += "bc-native dtc-native"
+DEPENDS_append = " python-native"
+DEPENDS_append_x86-64 = " iasl-native swig-native"
Don't _append when you can just extend the assignment above.
I just did it that way for the moment since I wanted to circumvent merge
conflicts when I upgrade the underlying recipes from openembedded.

Depending on python-native does nothing as the binary isn't in $PATH still.
I thought any Yocto native binaries built are in the PATH automatically. - How
to fix this/do it correctly?

Can you provide a configuration to make U-Boot build on x86 for testing?
I put now some patches in ordered manner on top of the original recipes from
openembedded.
However, before sending I would like to fix two issues so the recipes can go
into mainline immediately.

1.) I am working w/ -morty Yocto branch at the moment. The 'u-boot.inc' from
openembedded does not work w/ -morty.
I get the following error "AttributeError: module 'bb.utils' has no
attribute 'filter'". Building w/ the original -morty 'u-boot.inc' works.
2.) There is an issue w/ the U-Boot build system that it needs the "libgcc.a"
from the local host system [1]. I circumvented this by checking in my
"libgcc.a" and patching the U-Boot makefile accordingly. This must be
fixed in U-boot.

[1]... https://www.mail-archive.com/yocto@yoctoproject.org/msg36721.html

Cheers
Eric


Solved : Re: Problems building U-Boot for x86_64

Ferry Toth <ftoth@...>
 

Op Fri, 25 Aug 2017 22:30:42 +0000, schreef Ferry Toth:

Op Fri, 25 Aug 2017 21:22:40 +0200, schreef Ferry Toth:

Khem Raj wrote:




On 8/23/17 3:40 PM, Ferry Toth wrote:
Op Wed, 23 Aug 2017 14:51:55 -0700, schreef Khem Raj:

On 8/23/17 2:29 PM, Ferry Toth wrote:
Ferry Toth wrote:

Khem Raj wrote:

On 8/22/17 11:41 PM, Ferry Toth wrote:
I am having trouble building a specific U-Boot version with
Yocto. Outside of Yocto on 64 bit Ubuntu 17.04 with multilib it
builds fine.

I am extending meta-intel-edison to build a 64 bit Poke Morty,
with a vanilla 64-bit kernel (4.12). This is working quite well.

My host is x86_64, the target is core2 with tune=core-64.

Without 64bit tune I can build U-Boot fine. With 64bit it can
not link, appearently because it needs lbgcc.a
what is exact error message ? is it while compiling host bits or
target bits ?
The failing line is:
x86_64-poky-linux-ld.bfd -Bsymbolic -Bsymbolic-functions -m
elf_i386 --emit- relocs --wrap=__divdi3 --wrap=__udivdi3
--wrap=__moddi3 --wrap=__umoddi3 -- gc-sections -pie -Bstatic
--no-dynamic-linker -Ttext 0x01101000 -o u-boot -T u-boot.lds
arch/x86/cpu/start.o --start-group arch/x86/cpu/built-in.o
arch/x86/lib/built-in.o board/intel/edison/built-in.o
cmd/built-in.o common/built-in.o disk/built-in.o
drivers/built-in.o drivers/dma/built-in.o drivers/gpio/built-in.o
drivers/i2c/built-in.o drivers/mmc/built-in.o
drivers/mtd/built-in.o drivers/mtd/onenand/built-in.o
drivers/mtd/spi/built- in.o drivers/net/built-in.o
drivers/net/phy/built-in.o drivers/pci/built- in.o
drivers/power/built-in.o drivers/power/battery/built-in.o
drivers/power/domain/built-in.o
drivers/power/fuel_gauge/built-in.o drivers/power/mfd/built-in.o
drivers/power/pmic/built-in.o drivers/power/regulator/built-in.o
drivers/serial/built-in.o drivers/spi/built-in.o
drivers/usb/common/built-in.o drivers/usb/dwc3/built- in.o
drivers/usb/emul/built-in.o drivers/usb/eth/built-in.o
drivers/usb/gadget/built-in.o drivers/usb/gadget/udc/built-in.o
drivers/usb/host/built-in.o drivers/usb/musb-new/built-in.o
drivers/usb/musb/built-in.o drivers/usb/phy/built-in.o
drivers/usb/ulpi/built-in.o dts/built-in.o fs/built-in.o
lib/built-in.o net/built-in.o test/built-in.o test/dm/built-in.o
--end-group arch/x86/lib/lib.a -Map u-boot.map ERROR: oe_runmake
failed arch/x86/lib/built-in.o: In function `__wrap___udivdi3':
/home/ferry/tmp/edison-intel/my/edison-
morty/out/linux64/build/tmp/work/edison-poky-linux/u-boot/edison-
v2017.03-
r0/git/arch/x86/lib/gcc.c:25: undefined reference to
`__normal___udivdi3'
I as believe the missing lib is libgcc.a I just my sysroot and
found it here:
the linker cmdline above does not link with libgcc and there might
be a good reason for that, many standalone applications dont link
with libgcc intentionally. You could look into the code and see if
it can be written differently such that gcc does not have to invoke
a helper function from gcc runtime. Another option is to link with
libgcc explicitly
If change my setup to build for a 32bit target, it build u-boot
without error.
compiler may not be generating calls for the missing function.

When I build the same git outside yocto on 64bit with multilib
installed it also builds without error. In that case the make command
would be: make -j8 edison_defconfig
same is possible. Can you do readelf -sW gcc.o and see if there is a
undefined reference to __normal___udivdi3
20: 00000000 22 FUNC GLOBAL HIDDEN 4 __wrap___divdi3 21:
00000000 0 NOTYPE GLOBAL DEFAULT UND __normal___divdi3 22:
00000000 22 FUNC GLOBAL HIDDEN 6 __wrap___udivdi3 23:
00000000 0 NOTYPE GLOBAL DEFAULT UND __normal___udivdi3 24:
00000000 22 FUNC GLOBAL HIDDEN 8 __wrap___moddi3 25:
00000000 0 NOTYPE GLOBAL DEFAULT UND __normal___moddi3 26:
00000000 22 FUNC GLOBAL HIDDEN 10 __wrap___umoddi3 27:
00000000 0 NOTYPE GLOBAL DEFAULT UND __normal___umoddi3
AFAIKT when building for a 32-bit target is only that U-Boot makefile is
called with CROSS_COMPILE=i686-poky-linux- CC=i686-poky-linux-gcc
instead of CROSS_COMPILE=x86_64-poky-linux- CC=x86_64-poky-linux-gcc.

Result from readelf -sW gcc.o built wuth i686:

20: 00000000 27 FUNC GLOBAL HIDDEN 4 __wrap___divdi3 21:
00000000 0 NOTYPE GLOBAL DEFAULT UND __normal___divdi3 22:
00000000 27 FUNC GLOBAL HIDDEN 6 __wrap___udivdi3 23: 00000000
0 NOTYPE GLOBAL DEFAULT UND __normal___udivdi3 24: 00000000 27
FUNC GLOBAL HIDDEN 8 __wrap___moddi3 25: 00000000 0 NOTYPE
GLOBAL DEFAULT UND __normal___moddi3 26: 00000000 27 FUNC GLOBAL
HIDDEN 10 __wrap___umoddi3 27: 00000000 0 NOTYPE GLOBAL DEFAULT
UND __normal___umoddi3

The path to libs and incs is the same (x86_64...) and I can't find any
libgcc.a there. Still it links in this case.

Maybe in the 64-bit case I need to add i686-poky-linux-gcc to native and
modify the recipe to use that?


My conclusion: I have some bb variable set to the wrong value or I
need to get multilib installed into /..../sysroots/x86_64-linux/lib.

So how to do that?

sysroots/lib32-edison/usr/lib/i686-pokymllib32-linux/6.2.0/
sysroots/lib32-edison-tcbootstrap/usr/lib/i686-pokymllib32-
linux/6.2.0/
sysroots/edison/usr/lib64/x86_64-poky-linux/6.2.0/
sysroots/edison-tcbootstrap/usr/lib64/x86_64-poky-linux/6.2.0/

How compile log shows:
NOTE: make -j8 CROSS_COMPILE=x86_64-poky-linux-
CC=x86_64-poky-linux-gcc --sysroot=/..../sysroots/edison V=1
HOSTCC=gcc -isystem/..../sysroots/x86_64-linux/usr/include -O2
-pipe -L/..../sysroots/x86_64-linux/usr/lib
-L/..../sysroots/x86_64-linux/lib
-Wl,-rpath-link,/..../sysroots/x86_64-linux/usr/lib
-Wl,-rpath-link,/..../sysroots/x86_64-linux/lib
-Wl,-rpath,/..../sysroots/x86_64-linux/usr/lib
-Wl,-rpath,/..../sysroots/x86_64-linux/lib -Wl,-O1 -C
/..../out/linux64/build/tmp/work/edison-poky-linux/u-boot/edison-
v2017.03-r0/git
O=/..../out/linux64/build/tmp/work/edison-poky-linux/u-
boot/edison-v2017.03-r0/build edison_defconfig

(.... my edits to shorten the uninteresting part of the path)

I would think: --sysroot points to /edison dir which actually
contains libgcc.a, but -i, _l and -W1 options point to host dirs
that don't have the lib.


I attempted to add multilib, but although that immediately
exposed bugs in other recipes but actually adds libgcc.a, it
does that for the target sysroot only.

And for some reason, U-Boot is built with the native gcc
(x86_64-linux),
and multilib does not add libgcc.a to that sysroot.

So, how do I add multilib to -native sysroot, preferably only to
-native and not to the target, as the target has not further use
for it?

Strangest thing is in u-boot.inc there is:
EXTRA_OEMAKE = 'CROSS_COMPILE=${TARGET_PREFIX}
CC="${TARGET_PREFIX}gcc ${TOOLCHAIN_OPTIONS}" V=1'
EXTRA_OEMAKE += 'HOSTCC="${BUILD_CC} ${BUILD_CFLAGS}
${BUILD_LDFLAGS}"'

But when I check my log file:
NOTE: make -j8 CROSS_COMPILE=x86_64-poky-linux-
CC=x86_64-poky-linux- gcc ......

So TARGET_PREFIX resolves to x86_64-poky-linux, but I think my
target is core2_64 (or something like that). Is that normal for
U-Boot?
thats ok.


I am a little lost, so any help would be greatly appreciated
I added multilib to the meta0intel-edison-bsp machine conf:
#multilib
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
DEFAULTTUNE_virtclass-multilib-lib32 = "core2-32"
IMAGE_INSTALL_append = " lib32-libgcc"

This exposes a lot recipy bugs in other places that needed to be fixed
first.

Then I changed IMAGE_INSTALL += "u-boot" to "lib32_u-boot". Now it builds
without error.

Now I changed IMAGE_INSTALL tot EXTRA_IMAGEDEPENDS. I believe that will
prevent u-boot and the otherwise unecessary multilib to be installed on
the image. Not sure if that really works out as I hope.

All of this now causes populate_sdk to fail, but I will post that
seperately.

Thanks all for the usefull comments.

Ferry

--

--
--


Re: pkg-config search directories

eliya.mir@gmail.com <eliya.mir@...>
 

Hi Ross, 
I am still trying to configure pkg-config. 

Running bitbake glmark2 recipe results with an error :  
Checking for 'gl'                        : not found.

looking into the log file : 
Checking for 'gl'
['/home/wzbwjj/vpm/GR_Yocto/build/tmp/sysroots/x86_64-linux/usr/bin/pkg-config', 'gl', '--cflags', '--libs']
err: Package gl was not found in the pkg-config search path.
Perhaps you should add the directory containing `gl.pc'
to the PKG_CONFIG_PATH environment variable
No package 'gl' found

not found
from /home/wzbwjj/vpm/GR_Yocto/build/tmp/work/corei7-64-poky-linux/glmark2/2014.03+AUTOINC+fa71af2dfa-r0/git: The configuration failed


Thanks!



On Tue, Oct 3, 2017 at 6:40 PM, Burton, Ross <ross.burton@...> wrote:
On 3 October 2017 at 16:36, eliya.mir@... <eliya.mir@...> wrote:
Thanks Ross, 
That is the issue : default pkg-config does not search host paths - How to edit its search paths ? 

"If you *really* want to link against host binaries and not building your own native recipes for it then see qemu.inc for an example of how to steal the host pkg-config path and use it at configure time."

Ross


Re: [meta-rockchip] The various rockchip layers

Mirza Krak <mirza.krak@...>
 

2017-10-08 5:39 GMT+02:00 Trevor Woerner <twoerner@gmail.com>:
On Sat, Oct 7, 2017 at 7:03 PM, Mirza Krak <mirza.krak@gmail.com> wrote:
Thank you for taking the time and explaining the current and previous
state. It is highly appreciated.


As I started digging to check the current state of the different
layers it was quite clear to me that there where two different sets.
One is maintained by Rockchip [1] and the other one by the community
[2].
Don't forget https://github.com/jackmitch/meta-tinker ;-)
Yeah, noticed that one. Neat little layer that the meta-rockchip
should try and consume :).


And it made sense to me initially. I do not know the full background
story with the Rockchip layers (would be nice if someone could tell it
:)) on what the intent was with "community" Rockchip layers.
Romain started meta-rockchip initially, then I joined, then people
from Rockchip joined later.
Ok, that explains it a bit.


But as I looked in to it further it was quite clear to me that the
Rockchip maintained layers are more "up to date" then the community
ones. And then I started thinking on why are not these merged and we
could focus effort on maintaining one layer.
The main goal Romain and I have is to have a meta-rockchip that helps
users run upstream code on their rockchip devices. My guess is that
the main goal of the Rockchip meta-rockchip is to demonstrate the
performance of the rockchip SoC (usually via vendor kernels, vendor
bootloaders, binary blobs, etc.)

There are a couple things that are interesting:

- The Rockchip maintained layers state that they do accept
contributions trough pull-requests on github. So nothing stopping us
there?
They are quite friendly, but they have their goals.

- The Rockchip maintained layers supports more "community" boards then
the community layers does. Bit odd? :)
The rockchip people are paid to maintain meta-rockchip as part of
their day-jobs, Romain and I aren't. I buy my own boards, I haven't
received any hardware support, so my contributions tend to focus on
boards I actually have. I would rather have support for boards I can
actually test and therefore actually have rather than guessing whether
stuff will work. Not to mention I have to find time to work on this
after other "more important" things are done :-)
That is of course fully understandable.

- The community layers are a bit outdated on older Yocto branches,
master branch seems active though.
Mostly a time issue. I build master with firefly-rk3288 every night
with all the latest master updates and try to fix any issues that come
up. I don't have the resources, unfortunately, to keep my finger on
various past releases.
Also understandable.


- Trevor and Romain (maintainers of the community layers) are listed
as maintainers of the Rockchip layers? [4]
Initially the Rockchip people would send pull requests for the one
meta-rockchip layer. Many of those pull requests were to add recipes
pointing to vendor kernels/bootloaders and binary blobs. Also they
would send patches for boards that (at those times) weren't available
or sometimes weren't even announced! We pushed back on some of the
contributions, not just for philosophical reasons but sometimes for
technical reasons as well. They weren't happy with our slow response
times and push-back so they just forked and went on their own way.
When they forked they forgot to change some of the boilerplate stuff
that should have been changed (such as the maintainers). So yes,
Romain and I are listed as the maintainers of the Rockchip
meta-rockchip layer, but we're not :-)
This explains a lot thanks.

It's on my TODO list to send them some patches for things like that :-)

What I am really after is better understanding the workflow working
with Rockchip SOC`s and Yocto and that is why I am raising questions
to do so :).

My plan was getting involved in one of the Rockchip layers as I have
some improvements and I have some ideas for further improvements. And
the goal with this email was to figure out where.
Every once in a while I try to carve out some time to try the Rockchip
meta-rockchip layer and see how things are going. Maybe it's just a
coincidence, but often they don't build for me. Looking through their
instructions they seem to want lots of control over a user's
setup/configuration (i.e. by using repo to pull specific versions of a
specific set of layers, then using their own setup tool). My goal is
to have a layer that works any way the user wants to work (e.g.
distroless with openembedded-core, or with poky, or with angstrom,
etc...). When I use their instructions I do have more success (but not
always), but I don't believe that's how BSP layers should work. I
don't think it's a good situation when a user must use a specific
distro, or specific layers at specific commit points, or a specific
setup tool in order to build for a MACHINE successfully.
I actually recently was in the situation on choosing of one of
meta-rockchip layers and I ended up with the Rockchip one, mostly
since it had Tinkerboard support and 4.4 vendor kernel. It built for
me at least :).

I am not totally against using repo and manifest to help users setup a
simple "demo" (and it is quite common to use this). As the BSP layer
does not really provide you with anything cool beside booting to a
console, if one is to provide a demo layer (which is quite common to
demo X11, Walyand, and Qt as examples) making it easier to get started
with development. But the BSP layer should of course work standalone.


I'm hoping that one day
https://bugzilla.yoctoproject.org/show_bug.cgi?id=11881 will get
accepted. If/When that happens then it will be theoretically possible
to have both a set of upstream recipes and a set of vendor recipes
within the same BSP layer. Maybe at that point the two (three?) layers
can come together. Unfortunately there doesn't seem to be much
interest in BSP layers outside of the BSP community. I'll probably
just have to add the support to the layer itself in order to gain this
functionality (as do the FSL layers, which is where this idea
originates).
This is certainly a interesting change but IMO not required to have
multiple u-boots, Linux kernels etc. The Freescale layer is by far
more complex (due to the number of boards/SOCs supported) and makes
sense there, most other BSP layers maybe do not that kind of
complexity and not the same need I guess.


I am hoping for a better situation in the future. I'm glad for any
suggestions, patches, testing, etc. For example, the meta-rockchip I
help maintain still uses a vendor u-boot although I've been told the
upstream should work fine; I just haven't had the time to investigate.
Also, I'd like to add a linux-stable recipe for 4.13 (similar to
meta-odroid) but I can't seem to get the defconfig right. Also, I have
a firefly-rk3399 and a tinker-rk3288, but haven't had the time to get
anything into a state I can push publicly.
It is quite obvious to me that any attempted efforts should be
directed to the community layers as this was once the base of the
Rockchip layers.

I do have some time and ambition to spend I will list some ideas here
so that they can be shot down before I start working on them (some
things might not align with your goals of the layer):

- Synchronize the Rockchip and community layers. There are some
goodies in the Rockchip layers that would be nice to pull in.
Especially the vendor 4.4 kernel with GPU support and accompanied
binary blobs (that should work with Qt, Wayland, X11), I have tested
X11 my self. Running mainline is nice but without the GPU support your
are really not utilized the true power of the SoC`s. And IMO this
should be the default kernel as it provides the most functionality.

- Bring in various boards from Rockchip to community layer. I know
your wrote that you do not like to bring in boards that can not test,
but you will never have all the boards :). When bringing in the
various boards we should also try and get people interested in
maintaining them, that could be one condition on bring in boards that
you do not have.

- Bring in more vendor kernels (tinkerboard, phycore, firefly?) who
all have their own forks. Again mainline is nice but does not provide
the same functionality as the vendor kernels.

- Consolidate machine configurations. I started working on something
targeting the Rockchip layer here [2]

- Add proper extlinux support [1], instead of hardcoding it in the gpt
image classes.

- Move away from custom image classes to WIC, I am running this in a
custom layer and it works just fine.

- There seems to be a lot of "stale/dead" code in the community
rockchip layer. Old kernels and "petitboot" that might not be used
today? This is basically a clean-up task.

- I kinda like the meta-rockchip-extra layer to provide more "capable"
demo images for testing purpose.

Also the ambition would be to try and get Rockchip involved again in
the community layer instead of forking or at least push changes to
community layer if they want to keep the fork.

There is probably more but I will stop here for now :).


Never a dull moment :-D
We would not be doing this otherwise :).

[1]. https://git.yoctoproject.org/cgit.cgi/poky/plain/meta/classes/uboot-extlinux-config.bbclass
[2]. https://github.com/mirzak/meta-rockchip/commits/consolidate-machines

--
Best Regards
Mirza


Re: [meta-rockchip] The various rockchip layers

Trevor Woerner
 

On Sat, Oct 7, 2017 at 7:03 PM, Mirza Krak <mirza.krak@gmail.com> wrote:
As I started digging to check the current state of the different
layers it was quite clear to me that there where two different sets.
One is maintained by Rockchip [1] and the other one by the community
[2].
Don't forget https://github.com/jackmitch/meta-tinker ;-)

And it made sense to me initially. I do not know the full background
story with the Rockchip layers (would be nice if someone could tell it
:)) on what the intent was with "community" Rockchip layers.
Romain started meta-rockchip initially, then I joined, then people
from Rockchip joined later.

But as I looked in to it further it was quite clear to me that the
Rockchip maintained layers are more "up to date" then the community
ones. And then I started thinking on why are not these merged and we
could focus effort on maintaining one layer.
The main goal Romain and I have is to have a meta-rockchip that helps
users run upstream code on their rockchip devices. My guess is that
the main goal of the Rockchip meta-rockchip is to demonstrate the
performance of the rockchip SoC (usually via vendor kernels, vendor
bootloaders, binary blobs, etc.)

There are a couple things that are interesting:

- The Rockchip maintained layers state that they do accept
contributions trough pull-requests on github. So nothing stopping us
there?
They are quite friendly, but they have their goals.

- The Rockchip maintained layers supports more "community" boards then
the community layers does. Bit odd? :)
The rockchip people are paid to maintain meta-rockchip as part of
their day-jobs, Romain and I aren't. I buy my own boards, I haven't
received any hardware support, so my contributions tend to focus on
boards I actually have. I would rather have support for boards I can
actually test and therefore actually have rather than guessing whether
stuff will work. Not to mention I have to find time to work on this
after other "more important" things are done :-)

- The community layers are a bit outdated on older Yocto branches,
master branch seems active though.
Mostly a time issue. I build master with firefly-rk3288 every night
with all the latest master updates and try to fix any issues that come
up. I don't have the resources, unfortunately, to keep my finger on
various past releases.

- Trevor and Romain (maintainers of the community layers) are listed
as maintainers of the Rockchip layers? [4]
Initially the Rockchip people would send pull requests for the one
meta-rockchip layer. Many of those pull requests were to add recipes
pointing to vendor kernels/bootloaders and binary blobs. Also they
would send patches for boards that (at those times) weren't available
or sometimes weren't even announced! We pushed back on some of the
contributions, not just for philosophical reasons but sometimes for
technical reasons as well. They weren't happy with our slow response
times and push-back so they just forked and went on their own way.
When they forked they forgot to change some of the boilerplate stuff
that should have been changed (such as the maintainers). So yes,
Romain and I are listed as the maintainers of the Rockchip
meta-rockchip layer, but we're not :-)

It's on my TODO list to send them some patches for things like that :-)

What I am really after is better understanding the workflow working
with Rockchip SOC`s and Yocto and that is why I am raising questions
to do so :).

My plan was getting involved in one of the Rockchip layers as I have
some improvements and I have some ideas for further improvements. And
the goal with this email was to figure out where.
Every once in a while I try to carve out some time to try the Rockchip
meta-rockchip layer and see how things are going. Maybe it's just a
coincidence, but often they don't build for me. Looking through their
instructions they seem to want lots of control over a user's
setup/configuration (i.e. by using repo to pull specific versions of a
specific set of layers, then using their own setup tool). My goal is
to have a layer that works any way the user wants to work (e.g.
distroless with openembedded-core, or with poky, or with angstrom,
etc...). When I use their instructions I do have more success (but not
always), but I don't believe that's how BSP layers should work. I
don't think it's a good situation when a user must use a specific
distro, or specific layers at specific commit points, or a specific
setup tool in order to build for a MACHINE successfully.

I'm hoping that one day
https://bugzilla.yoctoproject.org/show_bug.cgi?id=11881 will get
accepted. If/When that happens then it will be theoretically possible
to have both a set of upstream recipes and a set of vendor recipes
within the same BSP layer. Maybe at that point the two (three?) layers
can come together. Unfortunately there doesn't seem to be much
interest in BSP layers outside of the BSP community. I'll probably
just have to add the support to the layer itself in order to gain this
functionality (as do the FSL layers, which is where this idea
originates).

I am hoping for a better situation in the future. I'm glad for any
suggestions, patches, testing, etc. For example, the meta-rockchip I
help maintain still uses a vendor u-boot although I've been told the
upstream should work fine; I just haven't had the time to investigate.
Also, I'd like to add a linux-stable recipe for 4.13 (similar to
meta-odroid) but I can't seem to get the defconfig right. Also, I have
a firefly-rk3399 and a tinker-rk3288, but haven't had the time to get
anything into a state I can push publicly.

Never a dull moment :-D


Re: [meta-rockchip] The various rockchip layers

Khem Raj
 

On Sat, Oct 7, 2017 at 4:03 PM, Mirza Krak <mirza.krak@gmail.com> wrote:
Hi all.

I recently started working with Rockchip SoC`s and I currently have a
Tinkerboard and a FireFly-RK3288 board. And as I recently enter the
Yocto Rockchip world I have some comments and questions on the current
setup/workflow which I found a bit confusing when starting out.

As I started digging to check the current state of the different
layers it was quite clear to me that there where two different sets.
One is maintained by Rockchip [1] and the other one by the community
[2].

And it made sense to me initially. I do not know the full background
story with the Rockchip layers (would be nice if someone could tell it
:)) on what the intent was with "community" Rockchip layers.

But as I looked in to it further it was quite clear to me that the
Rockchip maintained layers are more "up to date" then the community
ones. And then I started thinking on why are not these merged and we
could focus effort on maintaining one layer.

There are a couple things that are interesting:

- The Rockchip maintained layers state that they do accept
contributions trough pull-requests on github. So nothing stopping us
there?

- The Rockchip maintained layers supports more "community" boards then
the community layers does. Bit odd? :)

- The community layers are a bit outdated on older Yocto branches,
master branch seems active though.

- Trevor and Romain (maintainers of the community layers) are listed
as maintainers of the Rockchip layers? [4]
Its not a good situation, While it is good that both layers are maintained, it
should be clear in its purpose. It could be that github layer supports products
and might have binary stuff and may not work with latest upstreams and the
community layer while supporting lesser number of boards is kept uptodate
with respective upstreams. Ideally, it would be good if both layers were
in sync.


What I am really after is better understanding the workflow working
with Rockchip SOC`s and Yocto and that is why I am raising questions
to do so :).

My plan was getting involved in one of the Rockchip layers as I have
some improvements and I have some ideas for further improvements. And
the goal with this email was to figure out where.

[1]. https://github.com/rockchip-linux
[2]. http://git.yoctoproject.org/cgit/cgit.cgi/meta-rockchip/
[3]. http://freescale.github.io/doc/release-notes/2.2/#the-differences-between-project-name-and-freescale-release-name
[4]. https://github.com/rockchip-linux/meta-rockchip/blob/master/README#L65-L66
--
Best Regards
Mirza
--
_______________________________________________
yocto mailing list
yocto@yoctoproject.org
https://lists.yoctoproject.org/listinfo/yocto


[meta-security][PATCH 2/2] README: update with basic info

Armin Kuster
 

needed to pass yocto-check-layer

Signed-off-by: Armin Kuster <akuster808@gmail.com>
---
meta-tpm/README | 4 ++++
1 file changed, 4 insertions(+)

diff --git a/meta-tpm/README b/meta-tpm/README
index e69de29..bbc70bb 100644
--- a/meta-tpm/README
+++ b/meta-tpm/README
@@ -0,0 +1,4 @@
+meta-tpm layer
+==============
+
+This layer contains base TPM recipes.
--
2.7.4


[meta-security][PATCH 1/2] swtpm: fix cuse depends

Armin Kuster
 

if cuse is enabled, depend on fuse which is in meta-filesystems
throw error is layer is missing.

Signed-off-by: Armin Kuster <akuster808@gmail.com>
---
meta-tpm/recipes-tpm/swtpm/swtpm_1.0.bb | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/meta-tpm/recipes-tpm/swtpm/swtpm_1.0.bb b/meta-tpm/recipes-tpm/swtpm/swtpm_1.0.bb
index 14f668b..952de1a 100644
--- a/meta-tpm/recipes-tpm/swtpm/swtpm_1.0.bb
+++ b/meta-tpm/recipes-tpm/swtpm/swtpm_1.0.bb
@@ -3,7 +3,7 @@ LICENSE = "BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=fe8092c832b71ef20dfe4c6d3decb3a8"
SECTION = "apps"

-DEPENDS = "libtasn1 fuse expect socat glib-2.0 libtpm libtpm-native"
+DEPENDS = "libtasn1 expect socat glib-2.0 libtpm libtpm-native"

# configure checks for the tools already during compilation and
# then swtpm_setup needs them at runtime
@@ -32,7 +32,7 @@ PACKAGECONFIG += "${@bb.utils.contains('DISTRO_FEATURES', 'selinux', 'selinux',
PACKAGECONFIG[openssl] = "--with-openssl, --without-openssl, openssl"
PACKAGECONFIG[gnutls] = "--with-gnutls, --without-gnutls, gnutls"
PACKAGECONFIG[selinux] = "--with-selinux, --without-selinux, libselinux"
-PACKAGECONFIG[cuse] = "--with-cuse, --without-cuse"
+PACKAGECONFIG[cuse] = "--with-cuse, --without-cuse, fuse"

EXTRA_OECONF += "--with-tss-user=${TSS_USER} --with-tss-group=${TSS_GROUP}"

@@ -55,3 +55,9 @@ USERADD_PARAM_${PN} = "--system -g ${TSS_GROUP} --home-dir \
RDEPENDS_${PN} = "libtpm expect socat bash"

BBCLASSEXTEND = "native nativesdk"
+
+python() {
+ if 'cuse' in d.getVar('PACKAGECONFIG') and \
+ 'filesystems-layer' not in d.getVar('BBFILE_COLLECTIONS').split():
+ raise bb.parse.SkipRecipe('Cuse enabled which requires meta-filesystems to be present.')
+}
--
2.7.4


[meta-rockchip] The various rockchip layers

Mirza Krak <mirza.krak@...>
 

Hi all.

I recently started working with Rockchip SoC`s and I currently have a
Tinkerboard and a FireFly-RK3288 board. And as I recently enter the
Yocto Rockchip world I have some comments and questions on the current
setup/workflow which I found a bit confusing when starting out.

As I started digging to check the current state of the different
layers it was quite clear to me that there where two different sets.
One is maintained by Rockchip [1] and the other one by the community
[2].

And it made sense to me initially. I do not know the full background
story with the Rockchip layers (would be nice if someone could tell it
:)) on what the intent was with "community" Rockchip layers.

But as I looked in to it further it was quite clear to me that the
Rockchip maintained layers are more "up to date" then the community
ones. And then I started thinking on why are not these merged and we
could focus effort on maintaining one layer.

There are a couple things that are interesting:

- The Rockchip maintained layers state that they do accept
contributions trough pull-requests on github. So nothing stopping us
there?

- The Rockchip maintained layers supports more "community" boards then
the community layers does. Bit odd? :)

- The community layers are a bit outdated on older Yocto branches,
master branch seems active though.

- Trevor and Romain (maintainers of the community layers) are listed
as maintainers of the Rockchip layers? [4]

What I am really after is better understanding the workflow working
with Rockchip SOC`s and Yocto and that is why I am raising questions
to do so :).

My plan was getting involved in one of the Rockchip layers as I have
some improvements and I have some ideas for further improvements. And
the goal with this email was to figure out where.

[1]. https://github.com/rockchip-linux
[2]. http://git.yoctoproject.org/cgit/cgit.cgi/meta-rockchip/
[3]. http://freescale.github.io/doc/release-notes/2.2/#the-differences-between-project-name-and-freescale-release-name
[4]. https://github.com/rockchip-linux/meta-rockchip/blob/master/README#L65-L66
--
Best Regards
Mirza


Re: [meta-raspberry-pi] needing newer or patched version of g++

Khem Raj
 

On Sat, Oct 7, 2017 at 9:38 AM, Bill Jenkins <bill@korgrd.com> wrote:

On Sep 15, 2017, at 8:02 AM, Bill Jenkins <bill@korgrd.com> wrote:


On Sep 15, 2017, at 7:43 AM, Khem Raj <raj.khem@gmail.com> wrote:

On Fri, Sep 15, 2017 at 7:35 AM, Bill Jenkins <bill@korgrd.com> wrote:

On Sep 15, 2017, at 6:54 AM, Khem Raj <raj.khem@gmail.com> wrote:

On Thu, Sep 14, 2017 at 10:14 PM, Bill Jenkins <bill@korgrd.com> wrote:
After creating an SDK for a 32-bit Raspberry Pi3 target, I ran into the following compiler error when
compiling an application using the SDK:

internal compiler error: Max. number of generated reload insns per insn is achieved (90)

It turns out that a patch was submitted for g++ early last year for the above problem,
We need to backport the patch to 6.3.0 and regenerate SDK. If you can
point to patch that will be helpful.
Thanks Khem, here's a link to the commit:

https://github.com/gcc-mirror/gcc/commit/4fe01ba94e99e792ebe9da2ccb3b071aa1bac388#diff-af18d9175d034b2b3726f1ddc05fae55
OK and which release are you on ?
I've been building in the pyro branch. DISTRO_VERSION reports 2.3.2.
I hadn't heard any news on this. Is there any more information required?
Will it be possible to backport that patch to gcc 6.3.0?
Posted now here
https://patchwork.openembedded.org/patch/144754/


Re: [meta-raspberry-pi] needing newer or patched version of g++

Bill J
 

On Sep 15, 2017, at 8:02 AM, Bill Jenkins <bill@korgrd.com> wrote:


On Sep 15, 2017, at 7:43 AM, Khem Raj <raj.khem@gmail.com> wrote:

On Fri, Sep 15, 2017 at 7:35 AM, Bill Jenkins <bill@korgrd.com> wrote:

On Sep 15, 2017, at 6:54 AM, Khem Raj <raj.khem@gmail.com> wrote:

On Thu, Sep 14, 2017 at 10:14 PM, Bill Jenkins <bill@korgrd.com> wrote:
After creating an SDK for a 32-bit Raspberry Pi3 target, I ran into the following compiler error when
compiling an application using the SDK:

internal compiler error: Max. number of generated reload insns per insn is achieved (90)

It turns out that a patch was submitted for g++ early last year for the above problem,
We need to backport the patch to 6.3.0 and regenerate SDK. If you can
point to patch that will be helpful.
Thanks Khem, here's a link to the commit:

https://github.com/gcc-mirror/gcc/commit/4fe01ba94e99e792ebe9da2ccb3b071aa1bac388#diff-af18d9175d034b2b3726f1ddc05fae55
OK and which release are you on ?
I've been building in the pyro branch. DISTRO_VERSION reports 2.3.2.
I hadn't heard any news on this. Is there any more information required?
Will it be possible to backport that patch to gcc 6.3.0?

Cheers,
Bill



but apparently that
patch is not in the 6.3.0 version within the SDK. When I try to specify a newer version,
(by using PREFERRED_VERSION_gcc-cross-${TARGET_ARCH}) bitbake reports that only 5.4.0 or 6.3.0
are available. Any suggestions on how to resolve this? (i.e. is there some way to use a newer g++ or to
apply the patch?)

Thanks,
Bill
--
_______________________________________________
yocto mailing list
yocto@yoctoproject.org
https://lists.yoctoproject.org/listinfo/yocto


Missing /etc/resolv.conf

Paul D. DeRocco
 

(Yocto Pyro, using systemd configured with resolved, networkd, timedated,
timesyncd)

/etc/resolv.conf is supposed to be symlinked to
/run/systemd/resolve/resolv.conf, which gets updated with the DNS address
received from DHCP, but it's not. Looking through systemd_232.bb, I see
that do_install() uses sed to edit /usr/lib/tmpfiles.d/etc.conf to set up
this link. When I run the system, I see that etc.conf does indeed have
this edited line, but there is no /etc/resolv.conf.
systemd-tmpfiles-setup.service runs successfully. Isn't that what's
supposed to execute all the junk in /usr/lib/tmpfiles.d? All the
abovementioned servers are running.

Am I missing something?

--

Ciao, Paul D. DeRocco
Paul mailto:pderocco@ix.netcom.com


FW: QA cycle report for 2.4 RC1

libertad
 

Hello All,

Enjoy viewing the full Report for 2.4 RC1:  https://wiki.yoctoproject.org/wiki/WW40_-_2017-10-06_-_Full_Test_Cycle_2.4_RC1

 

======= Summary ========

 

The QA cycle for release 2.4 RC1 is complete.  There are 18 new bugs from which 5 are high priority and are targeted to be fixed in 2.4 M4.  Four of the High Priority bugs (12143[1], 12144[2], 12145[3], 12146[4]) are ptest related and one (12194[5]) is Toaster related.  An additional medium+ priority (12131[6]  Kernel traceback when reboot beaglebone) is targeted to be fixed by 2.4 M4. As an extra note on this report QA wants to highlight that, there are a total of 46 Bugs targeted to be fixed in 2.4 M4 (link [20]).

 

ptest

Results show that python and e2fsprogs are not executing, hence bugs 12143[1] and 12146[4], respectively, were opened and given a high priority.

Bash (bug 12145[3]) had a decrement in the pass rate of  1.27% and  busybox (bug 12144[2]) had a decrement in the pass rate of 0.19%. Although they are high priority, QA sees that the decrement is not very significant, hence this bugs can be lowered in priority.

 

Eclipse

Some good news: testing for Eclipse-Oxygen was  added for the first time to the QA test cycle.

 

Posix

2.4 RC1 Posix results are very similar to last QA cycle 2.4 M3 rc1. They only show that Edgerouter had one more failure if compared with the previous run.

 

                  Failures               2.4 RC1           2.4 M3 RC1

                  Generic86-64           46                      46

                  Beaglebone              45                      45

                  Edgerouter               47                      46

                  Mpc8315e-rdb         45                      45

 

For more details visit:  https://wiki.yoctoproject.org/wiki/Posix_result and https://wiki.yoctoproject.org/wiki/POSIX-results

 

LTP

2.4 RC1 LTP results show that, for Genericx86-64 and Beaglebone, the failures incremented by 1 test. On Edgerouter we have an increment of 2 failures if compared with QA cycle 2.4 M3 rc1.

 

                                                     2.4 RC1               |        2.4 M3 RC1

                                                Total  |  Failures      |      Total   |  Failures

               Genericx86-64         1391 |   27              |      1391   |     26

               Beaglebone              1353 |   18              |      1353   |     17

               Edgerouter               1353 |   34              |      1353   |     32

               Mpc8315e-rdb        1353  |   31              |      1353   |     31

 

For more details: https://wiki.yoctoproject.org/wiki/LTP_result

 

 

Performance

 

In general, the measurements were on the same levels showed up on 2.4_M3. The only noticeable variation was on “rootfs” that is showing an increase of 5% on Ubuntu and 7% on Fedora, although this does not represent an important regression.

 

Ubuntu                 Test       2.4 M3 rc1           2.4_rc1         %

                              sato       1:15:58              1:15:24           -0.75

                              rootfs    2:34                    2:45                 7.14

                              rmwork 1:08:15              1:08:19           0.10

                              kernel    5:36                    5:41                 1.49

                              eSDK      2:39                    2:40                 0.63

                                                                          

                                                                          

Fedora                 Test       2.4 M3 rc1           2.4_rc1         %

                              sato       1:14:13              1:17:02           3.80

                              rootfs    2:35                    2:44                 5.81

                              rmwork 1:09:06              1:10:25           1.91

                              kernel    5:58                    6:04                 1.68

                              eSDK      2:37                    2:42                 3.18

 

 

======= QA-Hints========

 

Due to the fact that, there are 5 high priority bugs and 46 bugs targeted to be fixed and solved by 2.4 M4, QA requests an RC2 so that all the commits/fixes relevant to 2.4 release are integrated.

 

======= Bugs ========

 

       New Bugs

            -12143[1] Ptest-runner does not run python, in 2.4rc1

            -12144[2] ptest-busybox cases failed in 2.4rc1

            -12145[3] ptest-bash cases failed in 2.4rc1

            -12146[4] ptest-e2fsprog cases failed in 2.4rc1

            -12194[5] Toaster server lost "toaster.conf" environment, user settings lost

            -12131[6] Kernel traceback when reboot beaglebone black Medium+

            -12170[7] Add mkfs.ext support to busybox on poky-tiny

            -12138[8] [master] the icon on the connection manager appears without connection in Eclipse Plugin 

            -12168[9] media player - must be warned that it is not possible to play MPEG-1 and MP3 formats

            -12150[10] bug for QA to create Testcases for this new test on testopia AUTO_eSDK_sdkext

            -12133[11] The search results for numbers are incorrect.

            -12140[12] The link and the x button from the search's result did not work as expected

            -12142[13] The color from the table from the project build changed when you use an special character.

            -12164[14] The field from time's task is empty

            -12171[15] After building an image, checking the packages there's nothing.

            -12175[16] [Test Case 946] View detailed configuration information for a build

            -12200[17] The button from New custom image is not working as expected

            -12201[18] The button from New custom image is letting you press it and it’s supposed to be disabled.

 

 

       Not new M+ bugs

            -11994[19]  Graphical qemu doesn’t work over remote X on Fedora 26

 

 

 

Full Bug Report: https://wiki.yoctoproject.org/wiki/WW40_-_2017-10-06_-_Full_Test_Cycle_2.4_RC1#Bugs_Found_during_QA_Test

 

 

======== Links =========

    1.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12143[1]

 

    2.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12144[2]

 

    3.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12145[3]

 

    4.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12146[4]

 

    5.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12194[5]

 

    6.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12131[6]

 

    7.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12170[7]

 

    8.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12138[8]

 

    9.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12168[9]

 

    10.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12150[10]

 

    11.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12133[11]

 

    12.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12140[12]

 

    13.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12142[13]

 

    14.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12164[14]

 

    15.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12171[15]

 

    16.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12175[16]

 

    17.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12200[17]

 

    18.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=12201[18]

 

    19.    https://bugzilla.yoctoproject.org/show_bug.cgi?id=11994[19]

 

    20.    https://wiki.yoctoproject.org/wiki/WW40_-_2017-10-06_-_Full_Test_Cycle_2.4_RC1#Current_community_Open_Bugs_-_Medium.2B.2FHigh[20]

 

Regards

Libertad G.

 

 


Re: Framework to implement mirroring in Yocto

Andre McCurdy <armccurdy@...>
 

On Fri, Oct 6, 2017 at 3:08 PM, Gutierrez, Hernan Ildefonso (Boise
R&D, FW) <hernan_gutierrez@hp.com> wrote:
Hi,

We are planning to implement a mirror for both source code downloaded and
sscache in our work environment.

We are planning to use Nexus and Nuget to allow storage and versioning
control. We don’t know if these are the right tools.
There are two main aspects of a mirror, fetching from the mirror
(happens during the build process) and populating the mirror
(generally happens outside the main build process).

For fetching from the mirror during the build process, the bitbake
fetchers are used, so your server needs to be accessible via a
protocol which the fetchers support, e.g http or https would be
recommended. Support for sstate mirrors via https with a password (ie
if you want private mirrors which can be accessed over the internet
without needing a VPN) was first added in pyro, so you'll need to
backport a couple of patches to make that work with morty. If the
standard bitbake fetchers don't meet your needs then it's not a huge
task to write a custom fetcher - see the Amazon S3 fetcher recently
added to bitbake. It's less than 100 lines of code.

For populating the mirror, you will likely need to implement something
custom, ie outside bitbake and the build process, since bitbake only
really supports writing sstate and downloads to local directories. It
could be something as simple as running rsync after every build to
upload from the build server's local sstate and downloads directories
to the mirror server(s). It could be something more complex - it's up
to you.

Note that for both sstate and downloads the files on the mirror would
be expected to be unique (e.g. there would never be two different
versions of "gcc-6.3.0.tar.bz2", etc), so there's no obvious reason to
be able to snapshot or version the state of the mirrors. Just let the
files accumulate over time - the old versions won't be lost or
over-written.

Since we are about to embark in this project, before starting I wanted to
know if you have some pointers on how can be implemented a mirroring
framework for yocto.

Not sure if there are some common tools (other tools) with which yocto
integrates nicely.

Any pointers will be appreciated,


Framework to implement mirroring in Yocto

Gutierrez, Hernan Ildefonso (Boise R&D, FW) <hernan_gutierrez@...>
 

Hi,

 

We are planning to implement a mirror for both source code downloaded and sscache in our work environment.

 

We are planning to use Nexus and Nuget to allow storage and versioning control. We don’t know if these are the right tools.

 

Since we are about to embark in this project, before starting I wanted to know if you have some pointers on how can be implemented a mirroring framework for yocto.

 

Not sure if there are some common tools (other tools) with which yocto integrates nicely.

 

Any pointers will be appreciated,

 

--Hernan

 

PS> We are in morty branch currently.


Re: [opkg-devel] [opkg-utils PATCH] opkg.py/opkg-make-index: Add option to include all fields

Alejandro del Castillo <alejandro.delcastillo@...>
 

yep, that makes sense, thanks for the contribution

merged

On 10/05/2017 12:03 PM, Jeffrey Pautler wrote:
If the -f option is enabled, opkg-make-index will include user-defined
fields in the package index rather than discarding them. This change is
motivated by the fact that opkg now has support for user-defined fields
in the package index.
Signed-off-by: Jeffrey Pautler <jeffrey.pautler@ni.com>
---
opkg-make-index | 11 +++++++----
opkg.py         | 28 ++++++++++++++++++----------
2 files changed, 25 insertions(+), 14 deletions(-)
diff --git a/opkg-make-index b/opkg-make-index
index 3f757f6..3227fc0 100755
--- a/opkg-make-index
+++ b/opkg-make-index
@@ -11,7 +11,7 @@ import re
verbose = 0
 def usage():
-     sys.stderr.write("%s [-h] [-s] [-m] [-a] [-l Packages.filelist] [-p Packages] [-r Packages.old] [-L localesdir] [-v] packagesdir\n" % (sys.argv[0],))
+     sys.stderr.write("%s [-h] [-s] [-m] [-a] [-f] [-l Packages.filelist] [-p Packages] [-r Packages.old] [-L localesdir] [-v] packagesdir\n" % (sys.argv[0],))
      sys.exit(-1)
 def to_morgue(filename):
@@ -43,7 +43,8 @@ stamplist_filename = "Packages.stamps"
opt_s = 0
opt_m = 0
opt_a = 0
-(opts, remaining_args) = getopt.getopt(sys.argv[1:], "hl:p:vsmr:L:a")
+opt_f = 0
+(opts, remaining_args) = getopt.getopt(sys.argv[1:], "hl:p:vsmr:L:af")
for (optkey, optval) in opts:
      if optkey == '-h':
           usage()
@@ -64,6 +65,8 @@ for (optkey, optval) in opts:
           locales_dir = optval
      if optkey == '-a':
           opt_a = 1
+     if optkey == '-f':
+          opt_f = 1
 if ( not remaining_args ):
      usage()
@@ -81,7 +84,7 @@ if old_filename:
      if (verbose):
           sys.stderr.write("Reading package list from " + old_filename + "\n")
      old_packages = opkg.Packages()
-     old_packages.read_packages_file(old_filename)
+     old_packages.read_packages_file(old_filename, opt_f)
      for k in list(old_packages.packages.keys()):
           p = old_packages.packages[k]
           old_pkg_hash[p.filename] = p
@@ -122,7 +125,7 @@ for abspath in files:
      if not pkg:
           if (verbose):
                sys.stderr.write("Reading info for package %s\n" % (filename,))
-          pkg = opkg.Package(abspath, relpath=pkg_dir)
+          pkg = opkg.Package(abspath, relpath=pkg_dir, all_fields=opt_f)
      if opt_a:
           pkg_key = ("%s:%s:%s" % (pkg.package, pkg.architecture, pkg.version))
diff --git a/opkg.py b/opkg.py
index 9131755..cdadcab 100644
--- a/opkg.py
+++ b/opkg.py
@@ -45,6 +45,7 @@ from stat import ST_SIZE
import arfile
import tarfile
import textwrap
+import collections
 class Version(object):
     """A class for holding parsed package version information."""
@@ -123,7 +124,7 @@ class Package(object):
     # relpath: If this argument is set, the file path is given relative to this
     #   path when a string representation of the Package object is created. If
     #   this argument is not set, the basename of the file path is given.
-    def __init__(self, fn=None, relpath=None):
+    def __init__(self, fn=None, relpath=None, all_fields=None):
         self.package = None
         self.version = 'none'
         self.parsed_version = None
@@ -153,6 +154,7 @@ class Package(object):
         self.fn = fn
         self.license = None
+        self.user_defined_fields = collections.OrderedDict()
         if fn:
             # see if it is deb format
             f = open(fn, "rb")
@@ -176,7 +178,7 @@ class Package(object):
             except KeyError:
                 control = tarf.extractfile("./control")
             try:
-                self.read_control(control)
+                self.read_control(control, all_fields)
             except TypeError as e:
                 sys.stderr.write("Cannot read control file '%s' - %s\n" % (fn, e))
             control.close()
@@ -215,7 +217,7 @@ class Package(object):
             self.size = stat[ST_SIZE]
         return int(self.size)
-    def read_control(self, control):
+    def read_control(self, control, all_fields=None):
         import os
         line = control.readline()
@@ -227,19 +229,22 @@ class Package(object):
             line = line.rstrip()
             lineparts = re.match(r'([\w-]*?):\s*(.*)', line)
             if lineparts:
-                name = lineparts.group(1).lower()
+                name = lineparts.group(1)
+                name_lowercase = name.lower()
                 value = lineparts.group(2)
                 while 1:
                     line = control.readline()
                     if not line: break
                     if line[0] != ' ': break
                     value = value + '\n' + line
-                if name == 'size':
+                if name_lowercase == 'size':
                     self.size = int(value)
-                elif name == 'md5sum':
+                elif name_lowercase == 'md5sum':
                     self.md5 = value
-                elif name in self.__dict__:
-                    self.__dict__[name] = value
+                elif name_lowercase in self.__dict__:
+                    self.__dict__[name_lowercase] = value
+                elif all_fields:
+                    self.user_defined_fields[name] = value
                 else:
                     print("Lost field %s, %s" % (name,value))
                     pass
@@ -490,6 +495,9 @@ class Package(object):
         if self.license: out = out + "License: %s\n" % (self.license)
         if self.priority: out = out + "Priority: %s\n" % (self.priority)
         if self.tags: out = out + "Tags: %s\n" % (self.tags)
+        if self.user_defined_fields:
+            for k, v in self.user_defined_fields.items():
+                out = out + "%s: %s\n" % (k, v)
         out = out + "\n"
         return out
@@ -523,12 +531,12 @@ class Packages(object):
         else:
             return 1
-    def read_packages_file(self, fn):
+    def read_packages_file(self, fn, all_fields=None):
         f = open(fn, "r")
         while True:
             pkg = Package()
             try:
-                pkg.read_control(f)
+                pkg.read_control(f, all_fields)
             except TypeError as e:
                 sys.stderr.write("Cannot read control file '%s' - %s\n" % (fn, e))
                 continue
--
2.7.4
--
You received this message because you are subscribed to the Google Groups "opkg-devel" group.
To unsubscribe from this group and stop receiving emails from it, send an email to opkg-devel+unsubscribe@googlegroups.com <mailto:opkg-devel+unsubscribe@googlegroups.com>.
For more options, visit https://groups.google.com/d/optout <https://urldefense.proofpoint.com/v2/url?u=https-3A__groups.google.com_d_optout&d=DwMFaQ&c=I_0YwoKy7z5LMTVdyO6YCiE2uzI1jjZZuIPelcSjixA&r=wNcrL2akRn6jfxhHaKavUrJB_C9JAMXtynjLd8ZzgXQ&m=ovQLEvQ9Lx-7vCjplOAzesO39gF_IJUaWHnH7MYJRKk&s=Cjo25iHoDUxMx8anWywj0hPZTramOkXjGn9UCLksWcU&e=>.
--
Cheers,

Alejandro

16801 - 16820 of 55069