Copyright 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Hans-Peter Nilsson <hp@bitrange.com>
This file is part of BFD, the Binary File Descriptor library.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
- "Traditional" linker relaxation (shrinking whole sections).
- Merge reloc stubs jumping to same location.
- GETA stub relaxation (call a stub for out of range new
R_MMIX_GETA_STUBBABLE). */
#include "bfd.h"
#include "sysdep.h"
#include "libbfd.h"
#include "elf-bfd.h"
#include "elf/mmix.h"
#include "opcode/mmix.h"
#define MINUS_ONE (((bfd_vma) 0) - 1)
#define MAX_PUSHJ_STUB_SIZE (5 * 4)
#define FATAL_DEBUG \
_bfd_abort (__FILE__, __LINE__, \
"Internal: Non-debugged code (test-case missing)")
#define BAD_CASE(x) \
_bfd_abort (__FILE__, __LINE__, \
"bad case for " #x)
struct _mmix_elf_section_data
{
struct bfd_elf_section_data elf;
union
{
struct bpo_reloc_section_info *reloc;
struct bpo_greg_section_info *greg;
} bpo;
struct pushj_stub_info
{
bfd_size_type n_pushj_relocs;
bfd_size_type stubs_size_sum;
of these. Allocated in mmix_elf_check_common_relocs. */
bfd_size_type *stub_size;
above: error coverage is easier and we don't have to reset the
stubs_size_sum for relocation. */
bfd_size_type stub_offset;
} pjs;
};
#define mmix_elf_section_data(sec) \
((struct _mmix_elf_section_data *) elf_section_data (sec))
this struct as mmix_elf_section_data (section)->bpo, which is otherwise
NULL. */
struct bpo_reloc_section_info
{
size_t first_base_plus_offset_reloc;
size_t n_bpo_relocs_this_section;
size_t bpo_index;
mmix_final_link_relocate. What we really want to get at is the
global single struct greg_relocation, so we stash it here. */
asection *bpo_greg_section;
};
There's one of these created for every BPO reloc. */
struct bpo_reloc_request
{
bfd_vma value;
must be added. The offset is in range 0..255. */
size_t regindex;
size_t offset;
which BPO relocs were found. Used to create an index after reloc
requests are sorted. */
size_t bpo_reloc_no;
into the other members. Is FALSE only for BPO relocs in a GC:ed
section. */
bfd_boolean valid;
};
greg contents section (MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME),
which is linked into the register contents section
(MMIX_REG_CONTENTS_SECTION_NAME). This section is created by the
linker; using the same hook as for usual with BPO relocs does not
collide. */
struct bpo_greg_section_info
{
BPO-relocs. */
size_t n_bpo_relocs;
sorted_indexes. Valid after the check.*relocs functions are called
for all incoming sections. It includes the number of BPO relocs in
sections that were GC:ed. */
size_t n_max_bpo_relocs;
don't have a single "after-relaxation" hook. */
size_t n_remaining_bpo_relocs_this_relaxation_round;
This is an approximation after _bfd_mmix_before_linker_allocation
and supposedly accurate after mmix_elf_relax_section is called for
all incoming non-collected sections. */
size_t n_allocated_bpo_gregs;
by increasing index for strict sorting order. */
size_t *bpo_reloc_indexes;
the relaxation function. */
struct bpo_reloc_request *reloc_request;
};
static bfd_boolean mmix_elf_link_output_symbol_hook
PARAMS ((struct bfd_link_info *, const char *, Elf_Internal_Sym *,
asection *, struct elf_link_hash_entry *));
static bfd_reloc_status_type mmix_elf_reloc
PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
static reloc_howto_type *bfd_elf64_bfd_reloc_type_lookup
PARAMS ((bfd *, bfd_reloc_code_real_type));
static void mmix_info_to_howto_rela
PARAMS ((bfd *, arelent *, Elf_Internal_Rela *));
static int mmix_elf_sort_relocs PARAMS ((const PTR, const PTR));
static bfd_boolean mmix_elf_new_section_hook
PARAMS ((bfd *, asection *));
static bfd_boolean mmix_elf_check_relocs
PARAMS ((bfd *, struct bfd_link_info *, asection *,
const Elf_Internal_Rela *));
static bfd_boolean mmix_elf_check_common_relocs
PARAMS ((bfd *, struct bfd_link_info *, asection *,
const Elf_Internal_Rela *));
static bfd_boolean mmix_elf_relocate_section
PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
Elf_Internal_Rela *, Elf_Internal_Sym *, asection **));
static asection * mmix_elf_gc_mark_hook
PARAMS ((asection *, struct bfd_link_info *, Elf_Internal_Rela *,
struct elf_link_hash_entry *, Elf_Internal_Sym *));
static bfd_boolean mmix_elf_gc_sweep_hook
PARAMS ((bfd *, struct bfd_link_info *, asection *,
const Elf_Internal_Rela *));
static bfd_reloc_status_type mmix_final_link_relocate
PARAMS ((reloc_howto_type *, asection *, bfd_byte *,
bfd_vma, bfd_signed_vma, bfd_vma, const char *, asection *));
static bfd_reloc_status_type mmix_elf_perform_relocation
PARAMS ((asection *, reloc_howto_type *, PTR, bfd_vma, bfd_vma));
static bfd_boolean mmix_elf_section_from_bfd_section
PARAMS ((bfd *, asection *, int *));
static bfd_boolean mmix_elf_add_symbol_hook
PARAMS ((bfd *, struct bfd_link_info *, Elf_Internal_Sym *,
const char **, flagword *, asection **, bfd_vma *));
static bfd_boolean mmix_elf_is_local_label_name
PARAMS ((bfd *, const char *));
static int bpo_reloc_request_sort_fn PARAMS ((const PTR, const PTR));
static bfd_boolean mmix_elf_relax_section
PARAMS ((bfd *abfd, asection *sec, struct bfd_link_info *link_info,
bfd_boolean *again));
extern bfd_boolean mmix_elf_final_link PARAMS ((bfd *, struct bfd_link_info *));
extern void mmix_elf_symbol_processing PARAMS ((bfd *, asymbol *));
extern void mmix_dump_bpo_gregs
PARAMS ((struct bfd_link_info *, bfd_error_handler_type));
static void
mmix_set_relaxable_size
PARAMS ((bfd *, asection *, void *));
their R_MMIX_ number. */
static reloc_howto_type elf_mmix_howto_table[] =
{
HOWTO (R_MMIX_NONE,
0,
2,
32,
FALSE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_NONE",
FALSE,
0,
0,
FALSE),
HOWTO (R_MMIX_8,
0,
0,
8,
FALSE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_8",
FALSE,
0,
0xff,
FALSE),
HOWTO (R_MMIX_16,
0,
1,
16,
FALSE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_16",
FALSE,
0,
0xffff,
FALSE),
HOWTO (R_MMIX_24,
0,
2,
24,
FALSE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_24",
FALSE,
~0xffffff,
0xffffff,
FALSE),
HOWTO (R_MMIX_32,
0,
2,
32,
FALSE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_32",
FALSE,
0,
0xffffffff,
FALSE),
HOWTO (R_MMIX_64,
0,
4,
64,
FALSE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_64",
FALSE,
0,
MINUS_ONE,
FALSE),
HOWTO (R_MMIX_PC_8,
0,
0,
8,
TRUE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_PC_8",
FALSE,
0,
0xff,
TRUE),
HOWTO (R_MMIX_PC_16,
0,
1,
16,
TRUE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_PC_16",
FALSE,
0,
0xffff,
TRUE),
HOWTO (R_MMIX_PC_24,
0,
2,
24,
TRUE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_PC_24",
FALSE,
~0xffffff,
0xffffff,
TRUE),
HOWTO (R_MMIX_PC_32,
0,
2,
32,
TRUE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_PC_32",
FALSE,
0,
0xffffffff,
TRUE),
HOWTO (R_MMIX_PC_64,
0,
4,
64,
TRUE,
0,
complain_overflow_bitfield,
bfd_elf_generic_reloc,
"R_MMIX_PC_64",
FALSE,
0,
MINUS_ONE,
TRUE),
HOWTO (R_MMIX_GNU_VTINHERIT,
0,
0,
0,
FALSE,
0,
complain_overflow_dont,
NULL,
"R_MMIX_GNU_VTINHERIT",
FALSE,
0,
0,
TRUE),
HOWTO (R_MMIX_GNU_VTENTRY,
0,
0,
0,
FALSE,
0,
complain_overflow_dont,
_bfd_elf_rel_vtable_reloc_fn,
"R_MMIX_GNU_VTENTRY",
FALSE,
0,
0,
FALSE),
possibly be reached by the GETA instruction. It can silently expand
to get a 64-bit operand, but will complain if any of the two least
significant bits are set. The howto members reflect a simple GETA. */
HOWTO (R_MMIX_GETA,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_GETA",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_GETA_1,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_GETA_1",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_GETA_2,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_GETA_2",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_GETA_3,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_GETA_3",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
It can silently expand to a 64-bit operand, but will emit an error if
any of the two least significant bits are set. The howto members
reflect a simple branch. */
HOWTO (R_MMIX_CBRANCH,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_CBRANCH",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_CBRANCH_J,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_CBRANCH_J",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_CBRANCH_1,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_CBRANCH_1",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_CBRANCH_2,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_CBRANCH_2",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_CBRANCH_3,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_CBRANCH_3",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
the beginning of a function (no usable restriction). It can silently
expand to a 64-bit operand, but will emit an error if any of the two
least significant bits are set. It can also expand into a call to a
stub; see R_MMIX_PUSHJ_STUBBABLE. The howto members reflect a simple
PUSHJ. */
HOWTO (R_MMIX_PUSHJ,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_PUSHJ",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_PUSHJ_1,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_PUSHJ_1",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_PUSHJ_2,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_PUSHJ_2",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_PUSHJ_3,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_PUSHJ_3",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
reach +-64M; the expansion can reach all 64 bits. Note that the 64M
limit is soon reached if you link the program in wildly different
memory segments. The howto members reflect a trivial JMP. */
HOWTO (R_MMIX_JMP,
2,
2,
27,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_JMP",
FALSE,
~0x1ffffff,
0x1ffffff,
TRUE),
HOWTO (R_MMIX_JMP_1,
2,
2,
27,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_JMP_1",
FALSE,
~0x1ffffff,
0x1ffffff,
TRUE),
HOWTO (R_MMIX_JMP_2,
2,
2,
27,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_JMP_2",
FALSE,
~0x1ffffff,
0x1ffffff,
TRUE),
HOWTO (R_MMIX_JMP_3,
2,
2,
27,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_JMP_3",
FALSE,
~0x1ffffff,
0x1ffffff,
TRUE),
when relaxation has done all it can do, these relocs are used. For
GETA/PUSHJ/branches. */
HOWTO (R_MMIX_ADDR19,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_ADDR19",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE),
HOWTO (R_MMIX_ADDR27,
2,
2,
27,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_ADDR27",
FALSE,
~0x1ffffff,
0x1ffffff,
TRUE),
instruction (offset -3) needs adjusting. */
HOWTO (R_MMIX_REG_OR_BYTE,
0,
1,
8,
FALSE,
0,
complain_overflow_bitfield,
mmix_elf_reloc,
"R_MMIX_REG_OR_BYTE",
FALSE,
0,
0xff,
FALSE),
HOWTO (R_MMIX_REG,
0,
1,
8,
FALSE,
0,
complain_overflow_bitfield,
mmix_elf_reloc,
"R_MMIX_REG",
FALSE,
0,
0xff,
FALSE),
The sizes must correspond to the valid range of the expression, while
the bitmasks correspond to what we store in the image. */
HOWTO (R_MMIX_BASE_PLUS_OFFSET,
0,
4,
64,
FALSE,
0,
complain_overflow_bitfield,
mmix_elf_reloc,
"R_MMIX_BASE_PLUS_OFFSET",
FALSE,
0,
0xffff,
FALSE),
expression is less than the number of global registers. No actual
modification of the contents is done. Implementing this as a
relocation was less intrusive than e.g. putting such expressions in a
section to discard *after* relocation. */
HOWTO (R_MMIX_LOCAL,
0,
0,
0,
FALSE,
0,
complain_overflow_dont,
mmix_elf_reloc,
"R_MMIX_LOCAL",
FALSE,
0,
0,
FALSE),
HOWTO (R_MMIX_PUSHJ_STUBBABLE,
2,
2,
19,
TRUE,
0,
complain_overflow_signed,
mmix_elf_reloc,
"R_MMIX_PUSHJ_STUBBABLE",
FALSE,
~0x0100ffff,
0x0100ffff,
TRUE)
};
struct mmix_reloc_map
{
bfd_reloc_code_real_type bfd_reloc_val;
enum elf_mmix_reloc_type elf_reloc_val;
};
static const struct mmix_reloc_map mmix_reloc_map[] =
{
{BFD_RELOC_NONE, R_MMIX_NONE},
{BFD_RELOC_8, R_MMIX_8},
{BFD_RELOC_16, R_MMIX_16},
{BFD_RELOC_24, R_MMIX_24},
{BFD_RELOC_32, R_MMIX_32},
{BFD_RELOC_64, R_MMIX_64},
{BFD_RELOC_8_PCREL, R_MMIX_PC_8},
{BFD_RELOC_16_PCREL, R_MMIX_PC_16},
{BFD_RELOC_24_PCREL, R_MMIX_PC_24},
{BFD_RELOC_32_PCREL, R_MMIX_PC_32},
{BFD_RELOC_64_PCREL, R_MMIX_PC_64},
{BFD_RELOC_VTABLE_INHERIT, R_MMIX_GNU_VTINHERIT},
{BFD_RELOC_VTABLE_ENTRY, R_MMIX_GNU_VTENTRY},
{BFD_RELOC_MMIX_GETA, R_MMIX_GETA},
{BFD_RELOC_MMIX_CBRANCH, R_MMIX_CBRANCH},
{BFD_RELOC_MMIX_PUSHJ, R_MMIX_PUSHJ},
{BFD_RELOC_MMIX_JMP, R_MMIX_JMP},
{BFD_RELOC_MMIX_ADDR19, R_MMIX_ADDR19},
{BFD_RELOC_MMIX_ADDR27, R_MMIX_ADDR27},
{BFD_RELOC_MMIX_REG_OR_BYTE, R_MMIX_REG_OR_BYTE},
{BFD_RELOC_MMIX_REG, R_MMIX_REG},
{BFD_RELOC_MMIX_BASE_PLUS_OFFSET, R_MMIX_BASE_PLUS_OFFSET},
{BFD_RELOC_MMIX_LOCAL, R_MMIX_LOCAL},
{BFD_RELOC_MMIX_PUSHJ_STUBBABLE, R_MMIX_PUSHJ_STUBBABLE}
};
static reloc_howto_type *
bfd_elf64_bfd_reloc_type_lookup (abfd, code)
bfd *abfd ATTRIBUTE_UNUSED;
bfd_reloc_code_real_type code;
{
unsigned int i;
for (i = 0;
i < sizeof (mmix_reloc_map) / sizeof (mmix_reloc_map[0]);
i++)
{
if (mmix_reloc_map[i].bfd_reloc_val == code)
return &elf_mmix_howto_table[mmix_reloc_map[i].elf_reloc_val];
}
return NULL;
}
static bfd_boolean
mmix_elf_new_section_hook (abfd, sec)
bfd *abfd;
asection *sec;
{
struct _mmix_elf_section_data *sdata;
bfd_size_type amt = sizeof (*sdata);
sdata = (struct _mmix_elf_section_data *) bfd_zalloc (abfd, amt);
if (sdata == NULL)
return FALSE;
sec->used_by_bfd = (PTR) sdata;
return _bfd_elf_new_section_hook (abfd, sec);
}
final relocation. Each relocation gets its *worst*-case expansion
in size when it arrives here; any reduction in size should have been
caught in linker relaxation earlier. When we get here, the relocation
looks like the smallest instruction with SWYM:s (nop:s) appended to the
max size. We fill in those nop:s.
R_MMIX_GETA: (FIXME: Relaxation should break this up in 1, 2, 3 tetra)
GETA $N,foo
->
SETL $N,foo & 0xffff
INCML $N,(foo >> 16) & 0xffff
INCMH $N,(foo >> 32) & 0xffff
INCH $N,(foo >> 48) & 0xffff
R_MMIX_CBRANCH: (FIXME: Relaxation should break this up, but
condbranches needing relaxation might be rare enough to not be
worthwhile.)
[P]Bcc $N,foo
->
[~P]B~cc $N,.+20
SETL $255,foo & ...
INCML ...
INCMH ...
INCH ...
GO $255,$255,0
R_MMIX_PUSHJ: (FIXME: Relaxation...)
PUSHJ $N,foo
->
SETL $255,foo & ...
INCML ...
INCMH ...
INCH ...
PUSHGO $N,$255,0
R_MMIX_JMP: (FIXME: Relaxation...)
JMP foo
->
SETL $255,foo & ...
INCML ...
INCMH ...
INCH ...
GO $255,$255,0
R_MMIX_ADDR19 and R_MMIX_ADDR27 are just filled in. */
static bfd_reloc_status_type
mmix_elf_perform_relocation (isec, howto, datap, addr, value)
asection *isec;
reloc_howto_type *howto;
PTR datap;
bfd_vma addr;
bfd_vma value;
{
bfd *abfd = isec->owner;
bfd_reloc_status_type flag = bfd_reloc_ok;
bfd_reloc_status_type r;
int offs = 0;
int reg = 255;
We handle the differences here and the common sequence later. */
switch (howto->type)
{
case R_MMIX_GETA:
offs = 0;
reg = bfd_get_8 (abfd, (bfd_byte *) datap + 1);
value += addr;
break;
case R_MMIX_CBRANCH:
{
int in1 = bfd_get_16 (abfd, (bfd_byte *) datap) << 16;
to five instructions ahead.
We *can* do better if we want to. If the branch is found to be
within limits, we could leave the branch as is; there'll just
be a bunch of NOP:s after it. But we shouldn't see this
sequence often enough that it's worth doing it. */
bfd_put_32 (abfd,
(((in1 ^ ((PRED_INV_BIT | COND_INV_BIT) << 24)) & ~0xffff)
| (24/4)),
(bfd_byte *) datap);
bfd_put_32 (abfd,
((GO_INSN_BYTE | IMM_OFFSET_BIT) << 24) | 0xffff00,
(bfd_byte *) datap + 20);
offs = 4;
value += addr;
}
break;
case R_MMIX_PUSHJ_STUBBABLE:
if ((value & 3) == 0
&& (r = bfd_check_overflow (complain_overflow_signed,
howto->bitsize,
0,
bfd_arch_bits_per_address (abfd),
value)) == bfd_reloc_ok)
goto pcrel_mmix_reloc_fits;
else
{
bfd_size_type size = isec->rawsize ? isec->rawsize : isec->size;
position for the stub. There's supposed to be room allocated
for the stub. */
bfd_byte *stubcontents
= ((bfd_byte *) datap
- (addr - (isec->output_section->vma + isec->output_offset))
+ size
+ mmix_elf_section_data (isec)->pjs.stub_offset);
bfd_vma stubaddr;
location of the stub. */
r = mmix_elf_perform_relocation (isec,
&elf_mmix_howto_table
[R_MMIX_ADDR19],
datap,
addr,
isec->output_section->vma
+ isec->output_offset
+ size
+ (mmix_elf_section_data (isec)
->pjs.stub_offset)
- addr);
if (r != bfd_reloc_ok)
return r;
stubaddr
= (isec->output_section->vma
+ isec->output_offset
+ size
+ mmix_elf_section_data (isec)->pjs.stub_offset);
insn stub. */
if (bfd_check_overflow (complain_overflow_signed,
elf_mmix_howto_table[R_MMIX_ADDR27].bitsize,
0,
bfd_arch_bits_per_address (abfd),
addr + value - stubaddr) == bfd_reloc_ok)
{
bfd_put_32 (abfd, JMP_INSN_BYTE << 24, stubcontents);
r = mmix_elf_perform_relocation (isec,
&elf_mmix_howto_table
[R_MMIX_ADDR27],
stubcontents,
stubaddr,
value + addr - stubaddr);
mmix_elf_section_data (isec)->pjs.stub_offset += 4;
if (size + mmix_elf_section_data (isec)->pjs.stub_offset
> isec->size)
abort ();
return r;
}
else
{
bfd_put_32 (abfd,
((GO_INSN_BYTE | IMM_OFFSET_BIT) << 24)
| 0xff00, (bfd_byte *) stubcontents + 16);
linker stub, and */
value += addr;
datap = stubcontents;
mmix_elf_section_data (isec)->pjs.stub_offset
+= MAX_PUSHJ_STUB_SIZE;
}
}
break;
case R_MMIX_PUSHJ:
{
int inreg = bfd_get_8 (abfd, (bfd_byte *) datap + 1);
bfd_put_32 (abfd,
((PUSHGO_INSN_BYTE | IMM_OFFSET_BIT) << 24)
| (inreg << 16)
| 0xff00,
(bfd_byte *) datap + 16);
value += addr;
}
break;
case R_MMIX_JMP:
link, and the destination is actually in range, we don't need to
execute the nops.
If so, we fall through to the bit-fiddling relocs.
FIXME: bfd_check_overflow seems broken; the relocation is
rightshifted before testing, so supply a zero rightshift. */
if (! ((value & 3) == 0
&& (r = bfd_check_overflow (complain_overflow_signed,
howto->bitsize,
0,
bfd_arch_bits_per_address (abfd),
value)) == bfd_reloc_ok))
{
modified below, and put a "GO $255,$255,0" after the
address-loading sequence. */
bfd_put_32 (abfd,
((GO_INSN_BYTE | IMM_OFFSET_BIT) << 24)
| 0xffff00,
(bfd_byte *) datap + 16);
value += addr;
break;
}
case R_MMIX_ADDR19:
case R_MMIX_ADDR27:
pcrel_mmix_reloc_fits:
if ((value & 3) == 0
&& (r = bfd_check_overflow (complain_overflow_signed,
howto->bitsize,
0,
bfd_arch_bits_per_address (abfd),
value)) == bfd_reloc_ok)
{
bfd_vma in1
= bfd_get_32 (abfd, (bfd_byte *) datap);
bfd_vma highbit;
if ((bfd_signed_vma) value < 0)
{
highbit = 1 << 24;
value += (1 << (howto->bitsize - 1));
}
else
highbit = 0;
value >>= 2;
bfd_put_32 (abfd,
(in1 & howto->src_mask)
| highbit
| (value & howto->dst_mask),
(bfd_byte *) datap);
return bfd_reloc_ok;
}
else
return bfd_reloc_overflow;
case R_MMIX_BASE_PLUS_OFFSET:
{
struct bpo_reloc_section_info *bpodata
= mmix_elf_section_data (isec)->bpo.reloc;
asection *bpo_greg_section
= bpodata->bpo_greg_section;
struct bpo_greg_section_info *gregdata
= mmix_elf_section_data (bpo_greg_section)->bpo.greg;
size_t bpo_index
= gregdata->bpo_reloc_indexes[bpodata->bpo_index++];
be the same as the value we stored for that relocation. It
doesn't cost much, so can be left in at all times. */
if (value != gregdata->reloc_request[bpo_index].value)
{
(*_bfd_error_handler)
(_("%s: Internal inconsistency error for value for\n\
linker-allocated global register: linked: 0x%lx%08lx != relaxed: 0x%lx%08lx\n"),
bfd_get_filename (isec->owner),
(unsigned long) (value >> 32), (unsigned long) value,
(unsigned long) (gregdata->reloc_request[bpo_index].value
>> 32),
(unsigned long) gregdata->reloc_request[bpo_index].value);
bfd_set_error (bfd_error_bad_value);
return bfd_reloc_overflow;
}
into datap and datap + 1 respectively. */
bfd_put_8 (abfd,
gregdata->reloc_request[bpo_index].regindex
+ bpo_greg_section->output_section->vma / 8,
datap);
bfd_put_8 (abfd,
gregdata->reloc_request[bpo_index].offset,
((unsigned char *) datap) + 1);
return bfd_reloc_ok;
}
case R_MMIX_REG_OR_BYTE:
case R_MMIX_REG:
if (value > 255)
return bfd_reloc_overflow;
bfd_put_8 (abfd, value, datap);
return bfd_reloc_ok;
default:
BAD_CASE (howto->type);
}
sequence. */
everything that looks strange. */
if (value & 3)
flag = bfd_reloc_overflow;
bfd_put_32 (abfd,
(SETL_INSN_BYTE << 24) | (value & 0xffff) | (reg << 16),
(bfd_byte *) datap + offs);
bfd_put_32 (abfd,
(INCML_INSN_BYTE << 24) | ((value >> 16) & 0xffff) | (reg << 16),
(bfd_byte *) datap + offs + 4);
bfd_put_32 (abfd,
(INCMH_INSN_BYTE << 24) | ((value >> 32) & 0xffff) | (reg << 16),
(bfd_byte *) datap + offs + 8);
bfd_put_32 (abfd,
(INCH_INSN_BYTE << 24) | ((value >> 48) & 0xffff) | (reg << 16),
(bfd_byte *) datap + offs + 12);
return flag;
}
static void
mmix_info_to_howto_rela (abfd, cache_ptr, dst)
bfd *abfd ATTRIBUTE_UNUSED;
arelent *cache_ptr;
Elf_Internal_Rela *dst;
{
unsigned int r_type;
r_type = ELF64_R_TYPE (dst->r_info);
BFD_ASSERT (r_type < (unsigned int) R_MMIX_max);
cache_ptr->howto = &elf_mmix_howto_table[r_type];
}
to other formats (such as mmo); this is the relocation function from
the reloc_table. We don't get here for final pure ELF linking. */
static bfd_reloc_status_type
mmix_elf_reloc (abfd, reloc_entry, symbol, data, input_section,
output_bfd, error_message)
bfd *abfd;
arelent *reloc_entry;
asymbol *symbol;
PTR data;
asection *input_section;
bfd *output_bfd;
char **error_message ATTRIBUTE_UNUSED;
{
bfd_vma relocation;
bfd_reloc_status_type r;
asection *reloc_target_output_section;
bfd_reloc_status_type flag = bfd_reloc_ok;
bfd_vma output_base = 0;
bfd_vma addr;
r = bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
some segment adjustments), we're done. */
if (r != bfd_reloc_continue)
return r;
if (bfd_is_und_section (symbol->section)
&& (symbol->flags & BSF_WEAK) == 0
&& output_bfd == (bfd *) NULL)
return bfd_reloc_undefined;
if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
return bfd_reloc_outofrange;
initial relocation command value. */
if (bfd_is_com_section (symbol->section))
relocation = 0;
else
relocation = symbol->value;
reloc_target_output_section = bfd_get_output_section (symbol);
are relocating against, plus any addend. */
if (output_bfd)
output_base = 0;
else
output_base = reloc_target_output_section->vma;
relocation += output_base + symbol->section->output_offset;
addr = (reloc_entry->address + input_section->output_section->vma
+ input_section->output_offset);
if (output_bfd != (bfd *) NULL)
{
relocation += reloc_entry->addend;
relocation to the reloc entry rather than the raw data.
Modify the reloc inplace to reflect what we now know. */
reloc_entry->addend = relocation;
reloc_entry->address += input_section->output_offset;
return flag;
}
return mmix_final_link_relocate (reloc_entry->howto, input_section,
data, reloc_entry->address,
reloc_entry->addend, relocation,
bfd_asymbol_name (symbol),
reloc_target_output_section);
}
for guidance if you're thinking of copying this. */
static bfd_boolean
mmix_elf_relocate_section (output_bfd, info, input_bfd, input_section,
contents, relocs, local_syms, local_sections)
bfd *output_bfd ATTRIBUTE_UNUSED;
struct bfd_link_info *info;
bfd *input_bfd;
asection *input_section;
bfd_byte *contents;
Elf_Internal_Rela *relocs;
Elf_Internal_Sym *local_syms;
asection **local_sections;
{
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
Elf_Internal_Rela *rel;
Elf_Internal_Rela *relend;
bfd_size_type size;
size_t pjsno = 0;
size = input_section->rawsize ? input_section->rawsize : input_section->size;
symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (input_bfd);
relend = relocs + input_section->reloc_count;
if (input_section->rawsize != 0
&& input_section->size > input_section->rawsize)
memset (contents + input_section->rawsize, 0,
input_section->size - input_section->rawsize);
for (rel = relocs; rel < relend; rel ++)
{
reloc_howto_type *howto;
unsigned long r_symndx;
Elf_Internal_Sym *sym;
asection *sec;
struct elf_link_hash_entry *h;
bfd_vma relocation;
bfd_reloc_status_type r;
const char *name = NULL;
int r_type;
bfd_boolean undefined_signalled = FALSE;
r_type = ELF64_R_TYPE (rel->r_info);
if (r_type == R_MMIX_GNU_VTINHERIT
|| r_type == R_MMIX_GNU_VTENTRY)
continue;
r_symndx = ELF64_R_SYM (rel->r_info);
if (info->relocatable)
{
change anything, unless the reloc is against a section
symbol, in which case we have to adjust according to where
the section symbol winds up in the output section. */
if (r_symndx < symtab_hdr->sh_info)
{
sym = local_syms + r_symndx;
if (ELF_ST_TYPE (sym->st_info) == STT_SECTION)
{
sec = local_sections [r_symndx];
rel->r_addend += sec->output_offset + sym->st_value;
}
}
reloc and the section contents, if the reloc doesn't reach
beyond the end of the output section and previous stubs.
Then we change the section contents to be a PUSHJ to the end
of the input section plus stubs (we can do that without using
a reloc), and then we change the reloc to be a R_MMIX_PUSHJ
at the stub location. */
if (r_type == R_MMIX_PUSHJ_STUBBABLE)
{
knowledge. */
if (mmix_elf_section_data (input_section)->pjs.stub_size[pjsno]
!= 0)
{
Elf_Internal_Rela relcpy;
if (mmix_elf_section_data (input_section)
->pjs.stub_size[pjsno] != MAX_PUSHJ_STUB_SIZE)
abort ();
the offset bits to the stub. */
if (mmix_final_link_relocate (elf_mmix_howto_table
+ R_MMIX_ADDR19,
input_section,
contents,
rel->r_offset,
0,
input_section
->output_section->vma
+ input_section->output_offset
+ size
+ mmix_elf_section_data (input_section)
->pjs.stub_offset,
NULL, NULL) != bfd_reloc_ok)
return FALSE;
R_MMIX_JMP reloc. */
bfd_put_32 (output_bfd, JMP_INSN_BYTE << 24,
contents
+ size
+ mmix_elf_section_data (input_section)
->pjs.stub_offset);
R_MMIX_JMP reloc. */
rel->r_info = ELF64_R_INFO (r_symndx, R_MMIX_JMP);
rel->r_offset
= (size
+ mmix_elf_section_data (input_section)
->pjs.stub_offset);
mmix_elf_section_data (input_section)->pjs.stub_offset
+= MAX_PUSHJ_STUB_SIZE;
the r_offset sorted reloc order. */
relcpy = *rel;
memmove (rel, rel + 1, (char *) relend - (char *) rel);
relend[-1] = relcpy;
in turn. */
rel--;
}
pjsno++;
}
continue;
}
howto = elf_mmix_howto_table + ELF64_R_TYPE (rel->r_info);
h = NULL;
sym = NULL;
sec = NULL;
if (r_symndx < symtab_hdr->sh_info)
{
sym = local_syms + r_symndx;
sec = local_sections [r_symndx];
relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
name = bfd_elf_string_from_elf_section (input_bfd,
symtab_hdr->sh_link,
sym->st_name);
if (name == NULL)
name = bfd_section_name (input_bfd, sec);
}
else
{
bfd_boolean unresolved_reloc;
RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
r_symndx, symtab_hdr, sym_hashes,
h, sec, relocation,
unresolved_reloc, undefined_signalled);
name = h->root.root.string;
}
r = mmix_final_link_relocate (howto, input_section,
contents, rel->r_offset,
rel->r_addend, relocation, name, sec);
if (r != bfd_reloc_ok)
{
bfd_boolean check_ok = TRUE;
const char * msg = (const char *) NULL;
switch (r)
{
case bfd_reloc_overflow:
check_ok = info->callbacks->reloc_overflow
(info, (h ? &h->root : NULL), name, howto->name,
(bfd_vma) 0, input_bfd, input_section, rel->r_offset);
break;
case bfd_reloc_undefined:
if (! undefined_signalled)
check_ok = info->callbacks->undefined_symbol
(info, name, input_bfd, input_section, rel->r_offset,
TRUE);
undefined_signalled = TRUE;
break;
case bfd_reloc_outofrange:
msg = _("internal error: out of range error");
break;
case bfd_reloc_notsupported:
msg = _("internal error: unsupported relocation error");
break;
case bfd_reloc_dangerous:
msg = _("internal error: dangerous relocation");
break;
default:
msg = _("internal error: unknown error");
break;
}
if (msg)
check_ok = info->callbacks->warning
(info, msg, name, input_bfd, input_section, rel->r_offset);
if (! check_ok)
return FALSE;
}
}
return TRUE;
}
routines. A few relocs we have to do ourselves. */
static bfd_reloc_status_type
mmix_final_link_relocate (howto, input_section, contents,
r_offset, r_addend, relocation, symname, symsec)
reloc_howto_type *howto;
asection *input_section;
bfd_byte *contents;
bfd_vma r_offset;
bfd_signed_vma r_addend;
bfd_vma relocation;
const char *symname;
asection *symsec;
{
bfd_reloc_status_type r = bfd_reloc_ok;
bfd_vma addr
= (input_section->output_section->vma
+ input_section->output_offset
+ r_offset);
bfd_signed_vma srel
= (bfd_signed_vma) relocation + r_addend;
switch (howto->type)
{
case R_MMIX_PUSHJ_STUBBABLE:
case R_MMIX_PUSHJ:
case R_MMIX_CBRANCH:
case R_MMIX_ADDR19:
case R_MMIX_GETA:
case R_MMIX_ADDR27:
case R_MMIX_JMP:
contents += r_offset;
srel -= (input_section->output_section->vma
+ input_section->output_offset
+ r_offset);
r = mmix_elf_perform_relocation (input_section, howto, contents,
addr, srel);
break;
case R_MMIX_BASE_PLUS_OFFSET:
if (symsec == NULL)
return bfd_reloc_undefined;
if (strcmp (bfd_get_section_name (symsec->owner, symsec),
MMIX_REG_CONTENTS_SECTION_NAME) == 0
|| strcmp (bfd_get_section_name (symsec->owner, symsec),
MMIX_REG_SECTION_NAME) == 0)
{
to ease the translation into other languages. */
if (symname == NULL || *symname == 0)
(*_bfd_error_handler)
(_("%s: base-plus-offset relocation against register symbol: (unknown) in %s"),
bfd_get_filename (input_section->owner),
bfd_get_section_name (symsec->owner, symsec));
else
(*_bfd_error_handler)
(_("%s: base-plus-offset relocation against register symbol: %s in %s"),
bfd_get_filename (input_section->owner), symname,
bfd_get_section_name (symsec->owner, symsec));
return bfd_reloc_overflow;
}
goto do_mmix_reloc;
case R_MMIX_REG_OR_BYTE:
case R_MMIX_REG:
symbol, which is either relative to the register section and in
the range 0..255, or is in the register contents section with vma
regno * 8. */
FIXME: Postpone section->scaling to mmix_elf_perform_relocation? */
if (symsec == NULL)
return bfd_reloc_undefined;
if (strcmp (bfd_get_section_name (symsec->owner, symsec),
MMIX_REG_CONTENTS_SECTION_NAME) == 0)
{
if ((srel & 7) != 0 || srel < 32*8 || srel > 255*8)
{
a better value, will not get us an error. */
return bfd_reloc_overflow;
}
srel /= 8;
}
else if (strcmp (bfd_get_section_name (symsec->owner, symsec),
MMIX_REG_SECTION_NAME) == 0)
{
if (srel < 0 || srel > 255)
better value, will not get us an error. */
return bfd_reloc_overflow;
}
else
{
to ease the translation into other languages. */
if (symname == NULL || *symname == 0)
(*_bfd_error_handler)
(_("%s: register relocation against non-register symbol: (unknown) in %s"),
bfd_get_filename (input_section->owner),
bfd_get_section_name (symsec->owner, symsec));
else
(*_bfd_error_handler)
(_("%s: register relocation against non-register symbol: %s in %s"),
bfd_get_filename (input_section->owner), symname,
bfd_get_section_name (symsec->owner, symsec));
better value, will not get us an error. */
return bfd_reloc_overflow;
}
do_mmix_reloc:
contents += r_offset;
r = mmix_elf_perform_relocation (input_section, howto, contents,
addr, srel);
break;
case R_MMIX_LOCAL:
final relocation value corresponds to a local register. We
ignore the actual relocation; nothing is changed. */
{
asection *regsec
= bfd_get_section_by_name (input_section->output_section->owner,
MMIX_REG_CONTENTS_SECTION_NAME);
bfd_vma first_global;
register contents section or the register (symbol) section.
Absolute numbers can get here as undefined section. Undefined
symbols are signalled elsewhere, so there's no conflict in us
accidentally handling it. */
if (!bfd_is_abs_section (symsec)
&& !bfd_is_und_section (symsec)
&& strcmp (bfd_get_section_name (symsec->owner, symsec),
MMIX_REG_CONTENTS_SECTION_NAME) != 0
&& strcmp (bfd_get_section_name (symsec->owner, symsec),
MMIX_REG_SECTION_NAME) != 0)
{
(*_bfd_error_handler)
(_("%s: directive LOCAL valid only with a register or absolute value"),
bfd_get_filename (input_section->owner));
return bfd_reloc_overflow;
}
first global register. */
if (regsec == NULL)
first_global = 255;
else
{
first_global = bfd_get_section_vma (abfd, regsec) / 8;
if (strcmp (bfd_get_section_name (symsec->owner, symsec),
MMIX_REG_CONTENTS_SECTION_NAME) == 0)
{
if ((srel & 7) != 0 || srel < 32*8 || srel > 255*8)
intuitively a better value, will not get us an error. */
return bfd_reloc_overflow;
srel /= 8;
}
}
if ((bfd_vma) srel >= first_global)
{
(*_bfd_error_handler)
(_("%s: LOCAL directive: Register $%ld is not a local register. First global register is $%ld."),
bfd_get_filename (input_section->owner), (long) srel, (long) first_global);
return bfd_reloc_overflow;
}
}
r = bfd_reloc_ok;
break;
default:
r = _bfd_final_link_relocate (howto, input_section->owner, input_section,
contents, r_offset,
relocation, r_addend);
}
return r;
}
relocation. */
static asection *
mmix_elf_gc_mark_hook (sec, info, rel, h, sym)
asection *sec;
struct bfd_link_info *info ATTRIBUTE_UNUSED;
Elf_Internal_Rela *rel;
struct elf_link_hash_entry *h;
Elf_Internal_Sym *sym;
{
if (h != NULL)
{
switch (ELF64_R_TYPE (rel->r_info))
{
case R_MMIX_GNU_VTINHERIT:
case R_MMIX_GNU_VTENTRY:
break;
default:
switch (h->root.type)
{
case bfd_link_hash_defined:
case bfd_link_hash_defweak:
return h->root.u.def.section;
case bfd_link_hash_common:
return h->root.u.c.p->section;
default:
break;
}
}
}
else
return bfd_section_from_elf_index (sec->owner, sym->st_shndx);
return NULL;
}
perform the allocation after GC, but there's no suitable hook between
GC (or section merge) and the point when all input sections must be
present. Better to waste some memory and (perhaps) a little time. */
static bfd_boolean
mmix_elf_gc_sweep_hook (abfd, info, sec, relocs)
bfd *abfd ATTRIBUTE_UNUSED;
struct bfd_link_info *info ATTRIBUTE_UNUSED;
asection *sec ATTRIBUTE_UNUSED;
const Elf_Internal_Rela *relocs ATTRIBUTE_UNUSED;
{
struct bpo_reloc_section_info *bpodata
= mmix_elf_section_data (sec)->bpo.reloc;
asection *allocated_gregs_section;
if (bpodata == NULL)
return TRUE;
allocated_gregs_section = bpodata->bpo_greg_section;
mmix_elf_section_data (allocated_gregs_section)->bpo.greg->n_bpo_relocs
-= bpodata->n_bpo_relocs_this_section;
return TRUE;
}
static int
mmix_elf_sort_relocs (p1, p2)
const PTR p1;
const PTR p2;
{
const Elf_Internal_Rela *r1 = (const Elf_Internal_Rela *) p1;
const Elf_Internal_Rela *r2 = (const Elf_Internal_Rela *) p2;
int r1_is_reg, r2_is_reg;
insns. */
if ((r1->r_offset & ~(bfd_vma) 3) > (r2->r_offset & ~(bfd_vma) 3))
return 1;
else if ((r1->r_offset & ~(bfd_vma) 3) < (r2->r_offset & ~(bfd_vma) 3))
return -1;
r1_is_reg
= (ELF64_R_TYPE (r1->r_info) == R_MMIX_REG_OR_BYTE
|| ELF64_R_TYPE (r1->r_info) == R_MMIX_REG);
r2_is_reg
= (ELF64_R_TYPE (r2->r_info) == R_MMIX_REG_OR_BYTE
|| ELF64_R_TYPE (r2->r_info) == R_MMIX_REG);
if (r1_is_reg != r2_is_reg)
return r2_is_reg - r1_is_reg;
if (r1->r_offset > r2->r_offset)
return 1;
else if (r1->r_offset < r2->r_offset)
return -1;
return 0;
}
static bfd_boolean
mmix_elf_check_common_relocs (abfd, info, sec, relocs)
bfd *abfd;
struct bfd_link_info *info;
asection *sec;
const Elf_Internal_Rela *relocs;
{
bfd *bpo_greg_owner = NULL;
asection *allocated_gregs_section = NULL;
struct bpo_greg_section_info *gregdata = NULL;
struct bpo_reloc_section_info *bpodata = NULL;
const Elf_Internal_Rela *rel;
const Elf_Internal_Rela *rel_end;
no target-machine-dedicated member. There's no alternative outside
the bfd_link_info struct; we can't specialize a hash-table since
they're different between ELF and mmo. */
bpo_greg_owner = (bfd *) info->base_file;
rel_end = relocs + sec->reloc_count;
for (rel = relocs; rel < rel_end; rel++)
{
switch (ELF64_R_TYPE (rel->r_info))
{
them, and we need to create a section for them, so we need an
object to fake as the owner of that section. We can't use
the ELF dynobj for this, since the ELF bits assume lots of
DSO-related stuff if that member is non-NULL. */
case R_MMIX_BASE_PLUS_OFFSET:
if (info->relocatable)
break;
if (bpo_greg_owner == NULL)
{
bpo_greg_owner = abfd;
info->base_file = (PTR) bpo_greg_owner;
}
if (allocated_gregs_section == NULL)
allocated_gregs_section
= bfd_get_section_by_name (bpo_greg_owner,
MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME);
if (allocated_gregs_section == NULL)
{
allocated_gregs_section
= bfd_make_section_with_flags (bpo_greg_owner,
MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME,
(SEC_HAS_CONTENTS
| SEC_IN_MEMORY
| SEC_LINKER_CREATED));
treated like any other section, and we'd get errors for
address overlap with the text section. Let's set none of
those flags, as that is what currently happens for usual
GREG allocations, and that works. */
if (allocated_gregs_section == NULL
|| !bfd_set_section_alignment (bpo_greg_owner,
allocated_gregs_section,
3))
return FALSE;
gregdata = (struct bpo_greg_section_info *)
bfd_zalloc (bpo_greg_owner, sizeof (struct bpo_greg_section_info));
if (gregdata == NULL)
return FALSE;
mmix_elf_section_data (allocated_gregs_section)->bpo.greg
= gregdata;
}
else if (gregdata == NULL)
gregdata
= mmix_elf_section_data (allocated_gregs_section)->bpo.greg;
if (bpodata == NULL)
{
limit - just use the number of relocs. */
bpodata = (struct bpo_reloc_section_info *)
bfd_alloc (bpo_greg_owner,
sizeof (struct bpo_reloc_section_info)
* (sec->reloc_count + 1));
if (bpodata == NULL)
return FALSE;
mmix_elf_section_data (sec)->bpo.reloc = bpodata;
bpodata->first_base_plus_offset_reloc
= bpodata->bpo_index
= gregdata->n_max_bpo_relocs;
bpodata->bpo_greg_section
= allocated_gregs_section;
bpodata->n_bpo_relocs_this_section = 0;
}
bpodata->n_bpo_relocs_this_section++;
gregdata->n_max_bpo_relocs++;
set up any hook that runs before GC. */
gregdata->n_bpo_relocs
= gregdata->n_max_bpo_relocs;
break;
case R_MMIX_PUSHJ_STUBBABLE:
mmix_elf_section_data (sec)->pjs.n_pushj_relocs++;
break;
}
}
size. */
if (mmix_elf_section_data (sec)->pjs.n_pushj_relocs != 0)
{
size_t i;
mmix_elf_section_data (sec)->pjs.stub_size
= bfd_alloc (abfd, mmix_elf_section_data (sec)->pjs.n_pushj_relocs
* sizeof (mmix_elf_section_data (sec)
->pjs.stub_size[0]));
if (mmix_elf_section_data (sec)->pjs.stub_size == NULL)
return FALSE;
for (i = 0; i < mmix_elf_section_data (sec)->pjs.n_pushj_relocs; i++)
mmix_elf_section_data (sec)->pjs.stub_size[i] = MAX_PUSHJ_STUB_SIZE;
}
return TRUE;
}
static bfd_boolean
mmix_elf_check_relocs (abfd, info, sec, relocs)
bfd *abfd;
struct bfd_link_info *info;
asection *sec;
const Elf_Internal_Rela *relocs;
{
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes, **sym_hashes_end;
const Elf_Internal_Rela *rel;
const Elf_Internal_Rela *rel_end;
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (abfd);
sym_hashes_end = sym_hashes + symtab_hdr->sh_size/sizeof(Elf64_External_Sym);
if (!elf_bad_symtab (abfd))
sym_hashes_end -= symtab_hdr->sh_info;
expansion-relocs to the same insn. FIXME: Not done for mmo. */
qsort ((PTR) relocs, sec->reloc_count, sizeof (Elf_Internal_Rela),
mmix_elf_sort_relocs);
if (!mmix_elf_check_common_relocs (abfd, info, sec, relocs))
return FALSE;
if (info->relocatable)
return TRUE;
rel_end = relocs + sec->reloc_count;
for (rel = relocs; rel < rel_end; rel++)
{
struct elf_link_hash_entry *h;
unsigned long r_symndx;
r_symndx = ELF64_R_SYM (rel->r_info);
if (r_symndx < symtab_hdr->sh_info)
h = NULL;
else
{
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
}
switch (ELF64_R_TYPE (rel->r_info))
{
Reconstruct it for later use during GC. */
case R_MMIX_GNU_VTINHERIT:
if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
return FALSE;
break;
used. Record for later use during GC. */
case R_MMIX_GNU_VTENTRY:
if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
return FALSE;
break;
}
}
return TRUE;
}
Copied from elf_link_add_object_symbols. */
bfd_boolean
_bfd_mmix_check_all_relocs (abfd, info)
bfd *abfd;
struct bfd_link_info *info;
{
asection *o;
for (o = abfd->sections; o != NULL; o = o->next)
{
Elf_Internal_Rela *internal_relocs;
bfd_boolean ok;
if ((o->flags & SEC_RELOC) == 0
|| o->reloc_count == 0
|| ((info->strip == strip_all || info->strip == strip_debugger)
&& (o->flags & SEC_DEBUGGING) != 0)
|| bfd_is_abs_section (o->output_section))
continue;
internal_relocs
= _bfd_elf_link_read_relocs (abfd, o, (PTR) NULL,
(Elf_Internal_Rela *) NULL,
info->keep_memory);
if (internal_relocs == NULL)
return FALSE;
ok = mmix_elf_check_common_relocs (abfd, info, o, internal_relocs);
if (! info->keep_memory)
free (internal_relocs);
if (! ok)
return FALSE;
}
return TRUE;
}
the register section, and scale them down to correspond to the register
number. */
static bfd_boolean
mmix_elf_link_output_symbol_hook (info, name, sym, input_sec, h)
struct bfd_link_info *info ATTRIBUTE_UNUSED;
const char *name ATTRIBUTE_UNUSED;
Elf_Internal_Sym *sym;
asection *input_sec;
struct elf_link_hash_entry *h ATTRIBUTE_UNUSED;
{
if (input_sec != NULL
&& input_sec->name != NULL
&& ELF_ST_TYPE (sym->st_info) != STT_SECTION
&& strcmp (input_sec->name, MMIX_REG_CONTENTS_SECTION_NAME) == 0)
{
sym->st_value /= 8;
sym->st_shndx = SHN_REGISTER;
}
return TRUE;
}
Having a SHN_REGISTER and register section translates better to other
formats (e.g. mmo) than for example a STT_REGISTER attribute.
This section faking is based on a construct in elf32-mips.c. */
static asection mmix_elf_reg_section;
static asymbol mmix_elf_reg_section_symbol;
static asymbol *mmix_elf_reg_section_symbol_ptr;
void
mmix_elf_symbol_processing (abfd, asym)
bfd *abfd ATTRIBUTE_UNUSED;
asymbol *asym;
{
elf_symbol_type *elfsym;
elfsym = (elf_symbol_type *) asym;
switch (elfsym->internal_elf_sym.st_shndx)
{
case SHN_REGISTER:
if (mmix_elf_reg_section.name == NULL)
{
mmix_elf_reg_section.name = MMIX_REG_SECTION_NAME;
mmix_elf_reg_section.flags = SEC_NO_FLAGS;
mmix_elf_reg_section.output_section = &mmix_elf_reg_section;
mmix_elf_reg_section.symbol = &mmix_elf_reg_section_symbol;
mmix_elf_reg_section.symbol_ptr_ptr = &mmix_elf_reg_section_symbol_ptr;
mmix_elf_reg_section_symbol.name = MMIX_REG_SECTION_NAME;
mmix_elf_reg_section_symbol.flags = BSF_SECTION_SYM;
mmix_elf_reg_section_symbol.section = &mmix_elf_reg_section;
mmix_elf_reg_section_symbol_ptr = &mmix_elf_reg_section_symbol;
}
asym->section = &mmix_elf_reg_section;
break;
default:
break;
}
}
index. */
static bfd_boolean
mmix_elf_section_from_bfd_section (abfd, sec, retval)
bfd * abfd ATTRIBUTE_UNUSED;
asection * sec;
int * retval;
{
if (strcmp (bfd_get_section_name (abfd, sec), MMIX_REG_SECTION_NAME) == 0)
*retval = SHN_REGISTER;
else
return FALSE;
return TRUE;
}
file. We must handle the special SHN_REGISTER section number here.
We also check that we only have *one* each of the section-start
symbols, since otherwise having two with the same value would cause
them to be "merged", but with the contents serialized. */
bfd_boolean
mmix_elf_add_symbol_hook (abfd, info, sym, namep, flagsp, secp, valp)
bfd *abfd;
struct bfd_link_info *info ATTRIBUTE_UNUSED;
Elf_Internal_Sym *sym;
const char **namep ATTRIBUTE_UNUSED;
flagword *flagsp ATTRIBUTE_UNUSED;
asection **secp;
bfd_vma *valp ATTRIBUTE_UNUSED;
{
if (sym->st_shndx == SHN_REGISTER)
{
*secp = bfd_make_section_old_way (abfd, MMIX_REG_SECTION_NAME);
(*secp)->flags |= SEC_LINKER_CREATED;
}
else if ((*namep)[0] == '_' && (*namep)[1] == '_' && (*namep)[2] == '.'
&& strncmp (*namep, MMIX_LOC_SECTION_START_SYMBOL_PREFIX,
strlen (MMIX_LOC_SECTION_START_SYMBOL_PREFIX)) == 0)
{
struct bfd_link_hash_entry *h = bfd_link_hash_lookup (info->hash,
*namep,
FALSE,
FALSE,
FALSE);
if (h != NULL && h->type != bfd_link_hash_undefined)
{
h->u.def.section->owner is NULL. */
((*_bfd_error_handler)
(_("%s: Error: multiple definition of `%s'; start of %s is set in a earlier linked file\n"),
bfd_get_filename (abfd), *namep,
*namep + strlen (MMIX_LOC_SECTION_START_SYMBOL_PREFIX)));
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
}
return TRUE;
}
bfd_boolean
mmix_elf_is_local_label_name (abfd, name)
bfd *abfd;
const char *name;
{
const char *colpos;
int digits;
if (_bfd_elf_is_local_label_name (abfd, name))
return TRUE;
if (*name != 'L')
return FALSE;
colpos = strchr (name, ':');
if (colpos == NULL || strchr (colpos + 1, ':') != NULL)
return FALSE;
if (colpos[1] == 0)
return FALSE;
digits = strspn (colpos + 1, "0123456789");
return digits != 0 && colpos[1 + digits] == 0;
}
bfd_boolean
mmix_elf_final_link (abfd, info)
bfd *abfd;
struct bfd_link_info *info;
{
temporary measures. Check that nobody entered contents into it. */
asection *reg_section;
reg_section = bfd_get_section_by_name (abfd, MMIX_REG_SECTION_NAME);
if (reg_section != NULL)
{
if (bfd_get_section_flags (abfd, reg_section) & SEC_HAS_CONTENTS)
_bfd_abort (__FILE__, __LINE__, _("Register section has contents\n"));
if (!bfd_section_removed_from_list (abfd, reg_section))
{
bfd_section_list_remove (abfd, reg_section);
--abfd->section_count;
}
}
if (! bfd_elf_final_link (abfd, info))
return FALSE;
the regular linker machinery. We do it here, like other targets with
special sections. */
if (info->base_file != NULL)
{
asection *greg_section
= bfd_get_section_by_name ((bfd *) info->base_file,
MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME);
if (!bfd_set_section_contents (abfd,
greg_section->output_section,
greg_section->contents,
(file_ptr) greg_section->output_offset,
greg_section->size))
return FALSE;
}
return TRUE;
}
section size. This is expected to shrink during linker relaxation. */
static void
mmix_set_relaxable_size (abfd, sec, ptr)
bfd *abfd ATTRIBUTE_UNUSED;
asection *sec;
void *ptr;
{
struct bfd_link_info *info = ptr;
otherwise we might end up resetting the size of COMMONs. */
if (mmix_elf_section_data (sec)->pjs.n_pushj_relocs == 0)
return;
sec->rawsize = sec->size;
sec->size += (mmix_elf_section_data (sec)->pjs.n_pushj_relocs
* MAX_PUSHJ_STUB_SIZE);
mmix_elf_relax_section. */
if (info->relocatable && sec->output_section)
mmix_elf_section_data (sec->output_section)->pjs.stubs_size_sum
+= (mmix_elf_section_data (sec)->pjs.n_pushj_relocs
* MAX_PUSHJ_STUB_SIZE);
}
R_MMIX_BASE_PLUS_OFFSET relocs seen by the linker. */
bfd_boolean
_bfd_mmix_before_linker_allocation (abfd, info)
bfd *abfd ATTRIBUTE_UNUSED;
struct bfd_link_info *info;
{
asection *bpo_gregs_section;
bfd *bpo_greg_owner;
struct bpo_greg_section_info *gregdata;
size_t n_gregs;
bfd_vma gregs_size;
size_t i;
size_t *bpo_reloc_indexes;
bfd *ibfd;
for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
bfd_map_over_sections (ibfd, mmix_set_relaxable_size, info);
mmix_elf_check_relocs when the first R_MMIX_BASE_PLUS_OFFSET is seen.
If there is no such object, there was no R_MMIX_BASE_PLUS_OFFSET. */
bpo_greg_owner = (bfd *) info->base_file;
if (bpo_greg_owner == NULL)
return TRUE;
bpo_gregs_section
= bfd_get_section_by_name (bpo_greg_owner,
MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME);
if (bpo_gregs_section == NULL)
return TRUE;
gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg;
if (gregdata == NULL)
return FALSE;
n_gregs = gregdata->n_bpo_relocs;
gregdata->n_allocated_bpo_gregs = n_gregs;
filled in and the size of the linker gregs can be calculated. */
gregdata->n_remaining_bpo_relocs_this_relaxation_round = n_gregs;
gregs_size = n_gregs * 8;
if (!bfd_set_section_size (bpo_greg_owner, bpo_gregs_section, gregs_size))
return FALSE;
time. Note that we must use the max number ever noted for the array,
since the index numbers were created before GC. */
gregdata->reloc_request
= bfd_zalloc (bpo_greg_owner,
sizeof (struct bpo_reloc_request)
* gregdata->n_max_bpo_relocs);
gregdata->bpo_reloc_indexes
= bpo_reloc_indexes
= bfd_alloc (bpo_greg_owner,
gregdata->n_max_bpo_relocs
* sizeof (size_t));
if (bpo_reloc_indexes == NULL)
return FALSE;
for (i = 0; i < gregdata->n_max_bpo_relocs; i++)
{
bpo_reloc_indexes[i] = i;
gregdata->reloc_request[i].bpo_reloc_no = i;
}
return TRUE;
}
calculated at this point; we just move the contents into place here. */
bfd_boolean
_bfd_mmix_after_linker_allocation (abfd, link_info)
bfd *abfd ATTRIBUTE_UNUSED;
struct bfd_link_info *link_info;
{
asection *bpo_gregs_section;
bfd *bpo_greg_owner;
struct bpo_greg_section_info *gregdata;
size_t n_gregs;
size_t i, j;
size_t lastreg;
bfd_byte *contents;
when the first R_MMIX_BASE_PLUS_OFFSET is seen. If there is no such
object, there was no R_MMIX_BASE_PLUS_OFFSET. */
bpo_greg_owner = (bfd *) link_info->base_file;
if (bpo_greg_owner == NULL)
return TRUE;
bpo_gregs_section
= bfd_get_section_by_name (bpo_greg_owner,
MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME);
without any R_MMIX_BASE_PLUS_OFFSET seen, there will be no such
section. */
if (bpo_gregs_section == NULL)
return TRUE;
gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg;
if (gregdata == NULL)
return FALSE;
n_gregs = gregdata->n_allocated_bpo_gregs;
bpo_gregs_section->contents
= contents = bfd_alloc (bpo_greg_owner, bpo_gregs_section->size);
if (contents == NULL)
return FALSE;
accounted for and the rest of gregdata is probably inconsistent.
It's a bug, but it's more helpful to identify it than segfaulting
below. */
if (gregdata->n_remaining_bpo_relocs_this_relaxation_round
!= gregdata->n_bpo_relocs)
{
(*_bfd_error_handler)
(_("Internal inconsistency: remaining %u != max %u.\n\
Please report this bug."),
gregdata->n_remaining_bpo_relocs_this_relaxation_round,
gregdata->n_bpo_relocs);
return FALSE;
}
for (lastreg = 255, i = 0, j = 0; j < n_gregs; i++)
if (gregdata->reloc_request[i].regindex != lastreg)
{
bfd_put_64 (bpo_greg_owner, gregdata->reloc_request[i].value,
contents + j * 8);
lastreg = gregdata->reloc_request[i].regindex;
j++;
}
return TRUE;
}
value. */
static int
bpo_reloc_request_sort_fn (p1, p2)
const PTR p1;
const PTR p2;
{
const struct bpo_reloc_request *r1 = (const struct bpo_reloc_request *) p1;
const struct bpo_reloc_request *r2 = (const struct bpo_reloc_request *) p2;
ones. */
if (r1->valid != r2->valid)
return r2->valid - r1->valid;
the values: the upper bits of the 64-bit value would be truncated on
a host with 32-bit ints. */
if (r1->value != r2->value)
return r1->value > r2->value ? 1 : -1;
sort. The *addresses* aren't stable since items are swapped during
sorting. It depends on the qsort implementation if this actually
happens. */
return r1->bpo_reloc_no > r2->bpo_reloc_no
? 1 : (r1->bpo_reloc_no < r2->bpo_reloc_no ? -1 : 0);
}
from base-plus-offset relocs. */
void
mmix_dump_bpo_gregs (link_info, pf)
struct bfd_link_info *link_info;
bfd_error_handler_type pf;
{
bfd *bpo_greg_owner;
asection *bpo_gregs_section;
struct bpo_greg_section_info *gregdata;
unsigned int i;
if (link_info == NULL || link_info->base_file == NULL)
return;
bpo_greg_owner = (bfd *) link_info->base_file;
bpo_gregs_section
= bfd_get_section_by_name (bpo_greg_owner,
MMIX_LD_ALLOCATED_REG_CONTENTS_SECTION_NAME);
if (bpo_gregs_section == NULL)
return;
gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg;
if (gregdata == NULL)
return;
if (pf == NULL)
pf = _bfd_error_handler;
only and never displayed to an end user. Should they escape, we
surely want them in original. */
(*pf) (" n_bpo_relocs: %u\n n_max_bpo_relocs: %u\n n_remain...round: %u\n\
n_allocated_bpo_gregs: %u\n", gregdata->n_bpo_relocs,
gregdata->n_max_bpo_relocs,
gregdata->n_remaining_bpo_relocs_this_relaxation_round,
gregdata->n_allocated_bpo_gregs);
if (gregdata->reloc_request)
for (i = 0; i < gregdata->n_max_bpo_relocs; i++)
(*pf) ("%4u (%4u)/%4u#%u: 0x%08lx%08lx r: %3u o: %3u\n",
i,
(gregdata->bpo_reloc_indexes != NULL
? gregdata->bpo_reloc_indexes[i] : (size_t) -1),
gregdata->reloc_request[i].bpo_reloc_no,
gregdata->reloc_request[i].valid,
(unsigned long) (gregdata->reloc_request[i].value >> 32),
(unsigned long) gregdata->reloc_request[i].value,
gregdata->reloc_request[i].regindex,
gregdata->reloc_request[i].offset);
}
when the last such reloc is done, an index-array is sorted according to
the values and iterated over to produce register numbers (indexed by 0
from the first allocated register number) and offsets for use in real
relocation.
PUSHJ stub accounting is also done here.
Symbol- and reloc-reading infrastructure copied from elf-m10200.c. */
static bfd_boolean
mmix_elf_relax_section (abfd, sec, link_info, again)
bfd *abfd;
asection *sec;
struct bfd_link_info *link_info;
bfd_boolean *again;
{
Elf_Internal_Shdr *symtab_hdr;
Elf_Internal_Rela *internal_relocs;
Elf_Internal_Rela *irel, *irelend;
asection *bpo_gregs_section = NULL;
struct bpo_greg_section_info *gregdata;
struct bpo_reloc_section_info *bpodata
= mmix_elf_section_data (sec)->bpo.reloc;
spot a missing actual initialization. */
size_t bpono = (size_t) -1;
size_t pjsno = 0;
bfd *bpo_greg_owner;
Elf_Internal_Sym *isymbuf = NULL;
bfd_size_type size = sec->rawsize ? sec->rawsize : sec->size;
mmix_elf_section_data (sec)->pjs.stubs_size_sum = 0;
*again = FALSE;
if this is not a code section. */
if ((sec->flags & SEC_RELOC) == 0
|| sec->reloc_count == 0
|| (sec->flags & SEC_CODE) == 0
|| (sec->flags & SEC_LINKER_CREATED) != 0
then nothing to do. */
|| (bpodata == NULL
&& mmix_elf_section_data (sec)->pjs.n_pushj_relocs == 0))
return TRUE;
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
bpo_greg_owner = (bfd *) link_info->base_file;
if (bpodata != NULL)
{
bpo_gregs_section = bpodata->bpo_greg_section;
gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg;
bpono = bpodata->first_base_plus_offset_reloc;
}
else
gregdata = NULL;
internal_relocs
= _bfd_elf_link_read_relocs (abfd, sec, (PTR) NULL,
(Elf_Internal_Rela *) NULL,
link_info->keep_memory);
if (internal_relocs == NULL)
goto error_return;
irelend = internal_relocs + sec->reloc_count;
for (irel = internal_relocs; irel < irelend; irel++)
{
bfd_vma symval;
struct elf_link_hash_entry *h = NULL;
if (ELF64_R_TYPE (irel->r_info) != (int) R_MMIX_BASE_PLUS_OFFSET
&& ELF64_R_TYPE (irel->r_info) != (int) R_MMIX_PUSHJ_STUBBABLE)
continue;
relocatable link (for one, we don't look at symbols), so we avoid
mixing its code with that for the "normal" relaxation. */
if (link_info->relocatable)
{
a full stub at the location of the stub calculated for the
input section, if the relocated stub location, the end of the
output section plus earlier stubs, cannot be reached. Thus
relocatable linking can only lead to worse code, but it still
works. */
if (ELF64_R_TYPE (irel->r_info) == R_MMIX_PUSHJ_STUBBABLE)
{
any current stubs, then we don't need a stub for this
reloc. The relaxed order of output stub allocation may
not exactly match the straightforward order, so we always
assume presence of output stubs, which will allow
relaxation only on relocations indifferent to the
presence of output stub allocations for other relocations
and thus the order of output stub allocation. */
if (bfd_check_overflow (complain_overflow_signed,
19,
0,
bfd_arch_bits_per_address (abfd),
sec->output_section->rawsize
+ (mmix_elf_section_data (sec
->output_section)
->pjs.stubs_size_sum)
- (sec->output_offset + irel->r_offset)
- (mmix_elf_section_data (sec)
->pjs.stub_size[pjsno]
+ MAX_PUSHJ_STUB_SIZE))
== bfd_reloc_ok)
mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 0;
mmix_elf_section_data (sec)->pjs.stubs_size_sum
+= mmix_elf_section_data (sec)->pjs.stub_size[pjsno];
pjsno++;
}
continue;
}
if (ELF64_R_SYM (irel->r_info) < symtab_hdr->sh_info)
{
Elf_Internal_Sym *isym;
asection *sym_sec;
if (isymbuf == NULL)
{
isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents;
if (isymbuf == NULL)
isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
symtab_hdr->sh_info, 0,
NULL, NULL, NULL);
if (isymbuf == 0)
goto error_return;
}
isym = isymbuf + ELF64_R_SYM (irel->r_info);
if (isym->st_shndx == SHN_UNDEF)
sym_sec = bfd_und_section_ptr;
else if (isym->st_shndx == SHN_ABS)
sym_sec = bfd_abs_section_ptr;
else if (isym->st_shndx == SHN_COMMON)
sym_sec = bfd_com_section_ptr;
else
sym_sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
symval = (isym->st_value
+ sym_sec->output_section->vma
+ sym_sec->output_offset);
}
else
{
unsigned long indx;
indx = ELF64_R_SYM (irel->r_info) - symtab_hdr->sh_info;
h = elf_sym_hashes (abfd)[indx];
BFD_ASSERT (h != NULL);
if (h->root.type != bfd_link_hash_defined
&& h->root.type != bfd_link_hash_defweak)
{
ignore it--it will be caught by the regular reloc processing.
We need to keep BPO reloc accounting consistent, though
else we'll abort instead of emitting an error message. */
if (ELF64_R_TYPE (irel->r_info) == R_MMIX_BASE_PLUS_OFFSET
&& gregdata != NULL)
{
gregdata->n_remaining_bpo_relocs_this_relaxation_round--;
bpono++;
}
continue;
}
symval = (h->root.u.def.value
+ h->root.u.def.section->output_section->vma
+ h->root.u.def.section->output_offset);
}
if (ELF64_R_TYPE (irel->r_info) == (int) R_MMIX_PUSHJ_STUBBABLE)
{
bfd_vma value = symval + irel->r_addend;
bfd_vma dot
= (sec->output_section->vma
+ sec->output_offset
+ irel->r_offset);
bfd_vma stubaddr
= (sec->output_section->vma
+ sec->output_offset
+ size
+ mmix_elf_section_data (sec)->pjs.stubs_size_sum);
if ((value & 3) == 0
&& bfd_check_overflow (complain_overflow_signed,
19,
0,
bfd_arch_bits_per_address (abfd),
value - dot
- (value > dot
? mmix_elf_section_data (sec)
->pjs.stub_size[pjsno]
: 0))
== bfd_reloc_ok)
mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 0;
else
if ((value & 3) == 0
&& bfd_check_overflow (complain_overflow_signed,
27,
0,
bfd_arch_bits_per_address (abfd),
value - stubaddr
- (value > dot
? mmix_elf_section_data (sec)
->pjs.stub_size[pjsno] - 4
: 0))
== bfd_reloc_ok)
mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 4;
else
emit the intermediate sizes; those will only be useful for
a >64M program assuming contiguous code. */
mmix_elf_section_data (sec)->pjs.stub_size[pjsno]
= MAX_PUSHJ_STUB_SIZE;
mmix_elf_section_data (sec)->pjs.stubs_size_sum
+= mmix_elf_section_data (sec)->pjs.stub_size[pjsno];
pjsno++;
continue;
}
gregdata->reloc_request[gregdata->bpo_reloc_indexes[bpono]].value
= symval + irel->r_addend;
gregdata->reloc_request[gregdata->bpo_reloc_indexes[bpono++]].valid = TRUE;
gregdata->n_remaining_bpo_relocs_this_relaxation_round--;
}
calculate how many registers we need to cover them. Set the size of
the linker gregs, and if the number of registers changed, indicate
that we need to relax some more because we have more work to do. */
if (gregdata != NULL
&& gregdata->n_remaining_bpo_relocs_this_relaxation_round == 0)
{
size_t i;
bfd_vma prev_base;
size_t regindex;
gregdata->n_remaining_bpo_relocs_this_relaxation_round
= gregdata->n_bpo_relocs;
qsort ((PTR) gregdata->reloc_request,
gregdata->n_max_bpo_relocs,
sizeof (struct bpo_reloc_request),
bpo_reloc_request_sort_fn);
after the initial iteration), we know we need to relax again,
since items in the GREG-array are sorted by increasing value and
stored in the relaxation phase. */
for (i = 0; i < gregdata->n_max_bpo_relocs; i++)
if (gregdata->bpo_reloc_indexes[gregdata->reloc_request[i].bpo_reloc_no]
!= i)
{
gregdata->bpo_reloc_indexes[gregdata->reloc_request[i].bpo_reloc_no]
= i;
*again = TRUE;
}
non-valid reloc. */
for (i = 0, regindex = 0, prev_base = gregdata->reloc_request[0].value;
i < gregdata->n_bpo_relocs;
i++)
{
if (gregdata->reloc_request[i].value > prev_base + 255)
{
regindex++;
prev_base = gregdata->reloc_request[i].value;
}
gregdata->reloc_request[i].regindex = regindex;
gregdata->reloc_request[i].offset
= gregdata->reloc_request[i].value - prev_base;
}
because the size of the section has changed. I'm not sure we
actually need to do any adjustments since the shrinking happens
at the start of this section, but better safe than sorry. */
if (gregdata->n_allocated_bpo_gregs != regindex + 1)
{
gregdata->n_allocated_bpo_gregs = regindex + 1;
*again = TRUE;
}
bpo_gregs_section->size = (regindex + 1) * 8;
}
if (isymbuf != NULL && (unsigned char *) isymbuf != symtab_hdr->contents)
{
if (! link_info->keep_memory)
free (isymbuf);
else
{
symtab_hdr->contents = (unsigned char *) isymbuf;
}
}
if (internal_relocs != NULL
&& elf_section_data (sec)->relocs != internal_relocs)
free (internal_relocs);
if (sec->size < size + mmix_elf_section_data (sec)->pjs.stubs_size_sum)
abort ();
if (sec->size > size + mmix_elf_section_data (sec)->pjs.stubs_size_sum)
{
sec->size = size + mmix_elf_section_data (sec)->pjs.stubs_size_sum;
*again = TRUE;
}
return TRUE;
error_return:
if (isymbuf != NULL && (unsigned char *) isymbuf != symtab_hdr->contents)
free (isymbuf);
if (internal_relocs != NULL
&& elf_section_data (sec)->relocs != internal_relocs)
free (internal_relocs);
return FALSE;
}
#define ELF_ARCH bfd_arch_mmix
#define ELF_MACHINE_CODE EM_MMIX
However, that's too much for something somewhere in the linker part of
BFD; perhaps the start-address has to be a non-zero multiple of this
number, or larger than this number. The symptom is that the linker
complains: "warning: allocated section `.text' not in segment". We
settle for 64k; the page-size used in examples is 8k.
#define ELF_MAXPAGESIZE 0x10000
Unfortunately, this causes excessive padding in the supposedly small
for-education programs that are the expected usage (where people would
inspect output). We stick to 256 bytes just to have *some* default
alignment. */
#define ELF_MAXPAGESIZE 0x100
#define TARGET_BIG_SYM bfd_elf64_mmix_vec
#define TARGET_BIG_NAME "elf64-mmix"
#define elf_info_to_howto_rel NULL
#define elf_info_to_howto mmix_info_to_howto_rela
#define elf_backend_relocate_section mmix_elf_relocate_section
#define elf_backend_gc_mark_hook mmix_elf_gc_mark_hook
#define elf_backend_gc_sweep_hook mmix_elf_gc_sweep_hook
#define elf_backend_link_output_symbol_hook \
mmix_elf_link_output_symbol_hook
#define elf_backend_add_symbol_hook mmix_elf_add_symbol_hook
#define elf_backend_check_relocs mmix_elf_check_relocs
#define elf_backend_symbol_processing mmix_elf_symbol_processing
#define bfd_elf64_bfd_is_local_label_name \
mmix_elf_is_local_label_name
#define elf_backend_may_use_rel_p 0
#define elf_backend_may_use_rela_p 1
#define elf_backend_default_use_rela_p 1
#define elf_backend_can_gc_sections 1
#define elf_backend_section_from_bfd_section \
mmix_elf_section_from_bfd_section
#define bfd_elf64_new_section_hook mmix_elf_new_section_hook
#define bfd_elf64_bfd_final_link mmix_elf_final_link
#define bfd_elf64_bfd_relax_section mmix_elf_relax_section
#include "elf64-target.h"